CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–50 of 4,417 results for author: <span class="mathjax">Zhu, X</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/" aria-role="search"> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Zhu, X"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Zhu%2C+X&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Zhu, X"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&query=Zhu%2C+X&start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&query=Zhu%2C+X&start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&query=Zhu%2C+X&start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&query=Zhu%2C+X&start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&query=Zhu%2C+X&start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&query=Zhu%2C+X&start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">…</span></li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14180">arXiv:2411.14180</a> <span> [<a href="https://arxiv.org/pdf/2411.14180">pdf</a>, <a href="https://arxiv.org/format/2411.14180">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Mesoscale and Nanoscale Physics">cond-mat.mes-hall</span> </div> </div> <p class="title is-5 mathjax"> Unveiling Ultrafast Spin-Valley Dynamics and Phonon-Mediated Charge Transfer in MoSe$_{2}$/WSe$_{2}$ Heterostructures </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Wagner%2C+J">Julian Wagner</a>, <a href="/search/?searchtype=author&query=Bernhardt%2C+R">Robin Bernhardt</a>, <a href="/search/?searchtype=author&query=Rieland%2C+L">Lukas Rieland</a>, <a href="/search/?searchtype=author&query=Abdul-Aziz%2C+O">Omar Abdul-Aziz</a>, <a href="/search/?searchtype=author&query=Li%2C+Q">Qiuyang Li</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiaoyang Zhu</a>, <a href="/search/?searchtype=author&query=Conte%2C+S+D">Stefano Dal Conte</a>, <a href="/search/?searchtype=author&query=Cerullo%2C+G">Giulio Cerullo</a>, <a href="/search/?searchtype=author&query=van+Loosdrecht%2C+P+H+M">Paul H. M. van Loosdrecht</a>, <a href="/search/?searchtype=author&query=Hedayat%2C+H">Hamoon Hedayat</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14180v1-abstract-short" style="display: inline;"> We use helicity-resolved ultrafast transient absorption spectroscopy to study spin-valley polarization dynamics in a vertically stacked MoSe$_{2}$/WSe$_{2}$ heterostructure. The experimental findings reveal details of interlayer charge transfer on ultrafast timescales, showing that the spin-valley polarized state of photoexcited carriers is conserved during the charge transfer and formation of int… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14180v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14180v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14180v1-abstract-full" style="display: none;"> We use helicity-resolved ultrafast transient absorption spectroscopy to study spin-valley polarization dynamics in a vertically stacked MoSe$_{2}$/WSe$_{2}$ heterostructure. The experimental findings reveal details of interlayer charge transfer on ultrafast timescales, showing that the spin-valley polarized state of photoexcited carriers is conserved during the charge transfer and formation of interlayer excitons. Our results confirm that phonon scattering mediates the interlayer charge transfer process, while a high phonon population at elevated temperatures causes a significant decrease in spin-valley selective charge transfer. Moreover, the experimental findings demonstrate the possibility that interlayer excitons and their spin-valley polarization can be probed in the optical response of intralayer excitons. These findings pave the way for ultrafast detection, control, and manipulation of spin-valley polarized excitons in transition metal dichalcogenide-based 2D heterostructures. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14180v1-abstract-full').style.display = 'none'; document.getElementById('2411.14180v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13775">arXiv:2411.13775</a> <span> [<a href="https://arxiv.org/pdf/2411.13775">pdf</a>, <a href="https://arxiv.org/format/2411.13775">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Benchmarking GPT-4 against Human Translators: A Comprehensive Evaluation Across Languages, Domains, and Expertise Levels </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Yan%2C+J">Jianhao Yan</a>, <a href="/search/?searchtype=author&query=Yan%2C+P">Pingchuan Yan</a>, <a href="/search/?searchtype=author&query=Chen%2C+Y">Yulong Chen</a>, <a href="/search/?searchtype=author&query=Li%2C+J">Jing Li</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xianchao Zhu</a>, <a href="/search/?searchtype=author&query=Zhang%2C+Y">Yue Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13775v1-abstract-short" style="display: inline;"> This study presents a comprehensive evaluation of GPT-4's translation capabilities compared to human translators of varying expertise levels. Through systematic human evaluation using the MQM schema, we assess translations across three language pairs (Chinese$\longleftrightarrow$English, Russian$\longleftrightarrow$English, and Chinese$\longleftrightarrow$Hindi) and three domains (News, Technology… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13775v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13775v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13775v1-abstract-full" style="display: none;"> This study presents a comprehensive evaluation of GPT-4's translation capabilities compared to human translators of varying expertise levels. Through systematic human evaluation using the MQM schema, we assess translations across three language pairs (Chinese$\longleftrightarrow$English, Russian$\longleftrightarrow$English, and Chinese$\longleftrightarrow$Hindi) and three domains (News, Technology, and Biomedical). Our findings reveal that GPT-4 achieves performance comparable to junior-level translators in terms of total errors, while still lagging behind senior translators. Unlike traditional Neural Machine Translation systems, which show significant performance degradation in resource-poor language directions, GPT-4 maintains consistent translation quality across all evaluated language pairs. Through qualitative analysis, we identify distinctive patterns in translation approaches: GPT-4 tends toward overly literal translations and exhibits lexical inconsistency, while human translators sometimes over-interpret context and introduce hallucinations. This study represents the first systematic comparison between LLM and human translators across different proficiency levels, providing valuable insights into the current capabilities and limitations of LLM-based translation systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13775v1-abstract-full').style.display = 'none'; document.getElementById('2411.13775v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Work in progress</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13362">arXiv:2411.13362</a> <span> [<a href="https://arxiv.org/pdf/2411.13362">pdf</a>, <a href="https://arxiv.org/format/2411.13362">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> RTSR: A Real-Time Super-Resolution Model for AV1 Compressed Content </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Jiang%2C+Y">Yuxuan Jiang</a>, <a href="/search/?searchtype=author&query=Nawa%C5%82a%2C+J">Jakub Nawa艂a</a>, <a href="/search/?searchtype=author&query=Feng%2C+C">Chen Feng</a>, <a href="/search/?searchtype=author&query=Zhang%2C+F">Fan Zhang</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiaoqing Zhu</a>, <a href="/search/?searchtype=author&query=Sole%2C+J">Joel Sole</a>, <a href="/search/?searchtype=author&query=Bull%2C+D">David Bull</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13362v1-abstract-short" style="display: inline;"> Super-resolution (SR) is a key technique for improving the visual quality of video content by increasing its spatial resolution while reconstructing fine details. SR has been employed in many applications including video streaming, where compressed low-resolution content is typically transmitted to end users and then reconstructed with a higher resolution and enhanced quality. To support real-time… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13362v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13362v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13362v1-abstract-full" style="display: none;"> Super-resolution (SR) is a key technique for improving the visual quality of video content by increasing its spatial resolution while reconstructing fine details. SR has been employed in many applications including video streaming, where compressed low-resolution content is typically transmitted to end users and then reconstructed with a higher resolution and enhanced quality. To support real-time playback, it is important to implement fast SR models while preserving reconstruction quality; however most existing solutions, in particular those based on complex deep neural networks, fail to do so. To address this issue, this paper proposes a low-complexity SR method, RTSR, designed to enhance the visual quality of compressed video content, focusing on resolution up-scaling from a) 360p to 1080p and from b) 540p to 4K. The proposed approach utilizes a CNN-based network architecture, which was optimized for AV1 (SVT)-encoded content at various quantization levels based on a dual-teacher knowledge distillation method. This method was submitted to the AIM 2024 Video Super-Resolution Challenge, specifically targeting the Efficient/Mobile Real-Time Video Super-Resolution competition. It achieved the best trade-off between complexity and coding performance (measured in PSNR, SSIM and VMAF) among all six submissions. The code will be available soon. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13362v1-abstract-full').style.display = 'none'; document.getElementById('2411.13362v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13110">arXiv:2411.13110</a> <span> [<a href="https://arxiv.org/pdf/2411.13110">pdf</a>, <a href="https://arxiv.org/ps/2411.13110">ps</a>, <a href="https://arxiv.org/format/2411.13110">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Nuclear Theory">nucl-th</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Nuclear Experiment">nucl-ex</span> </div> </div> <p class="title is-5 mathjax"> Exploring hadron-quark phase transition in heavy-ion collisions using particle emission ratios in heavy and light reaction systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zhu%2C+X">Xun Zhu</a>, <a href="/search/?searchtype=author&query=Yong%2C+G">Gao-Chan Yong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13110v1-abstract-short" style="display: inline;"> Based on the AMPT model, which incorporates both hadronic and quark degrees of freedom, we studied the productions of lambda, kaon, proton, and pion in reaction systems $^{40}$Ca+$^{40}$Ca, $^{48}$Ca+$^{48}$Ca, and $^{197}$Au+$^{197}$Au. It is found that the ratios of identical particle emissions from heavy and light reaction systems, especially the emission ratios of strange particles $螞^{0}$ or… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13110v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13110v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13110v1-abstract-full" style="display: none;"> Based on the AMPT model, which incorporates both hadronic and quark degrees of freedom, we studied the productions of lambda, kaon, proton, and pion in reaction systems $^{40}$Ca+$^{40}$Ca, $^{48}$Ca+$^{48}$Ca, and $^{197}$Au+$^{197}$Au. It is found that the ratios of identical particle emissions from heavy and light reaction systems, especially the emission ratios of strange particles $螞^{0}$ or K$^{+}$ in heavy and light reaction systems, are highly sensitive to the hadron-quark phase transition in heavy-ion collisions. Detailed explanations of these results and validations using the PACIAE model are given. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13110v1-abstract-full').style.display = 'none'; document.getElementById('2411.13110v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 5 figures, submitted</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12783">arXiv:2411.12783</a> <span> [<a href="https://arxiv.org/pdf/2411.12783">pdf</a>, <a href="https://arxiv.org/format/2411.12783">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Med-2E3: A 2D-Enhanced 3D Medical Multimodal Large Language Model </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Shi%2C+Y">Yiming Shi</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xun Zhu</a>, <a href="/search/?searchtype=author&query=Hu%2C+Y">Ying Hu</a>, <a href="/search/?searchtype=author&query=Guo%2C+C">Chenyi Guo</a>, <a href="/search/?searchtype=author&query=Li%2C+M">Miao Li</a>, <a href="/search/?searchtype=author&query=Wu%2C+J">Ji Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12783v1-abstract-short" style="display: inline;"> The analysis of 3D medical images is crucial for modern healthcare, yet traditional task-specific models are becoming increasingly inadequate due to limited generalizability across diverse clinical scenarios. Multimodal large language models (MLLMs) offer a promising solution to these challenges. However, existing MLLMs have limitations in fully leveraging the rich, hierarchical information embedd… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12783v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12783v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12783v1-abstract-full" style="display: none;"> The analysis of 3D medical images is crucial for modern healthcare, yet traditional task-specific models are becoming increasingly inadequate due to limited generalizability across diverse clinical scenarios. Multimodal large language models (MLLMs) offer a promising solution to these challenges. However, existing MLLMs have limitations in fully leveraging the rich, hierarchical information embedded in 3D medical images. Inspired by clinical practice, where radiologists focus on both 3D spatial structure and 2D planar content, we propose Med-2E3, a novel MLLM for 3D medical image analysis that integrates 3D and 2D encoders. To aggregate 2D features more effectively, we design a Text-Guided Inter-Slice (TG-IS) scoring module, which scores the attention of each 2D slice based on slice contents and task instructions. To the best of our knowledge, Med-2E3 is the first MLLM to integrate both 3D and 2D features for 3D medical image analysis. Experiments on a large-scale, open-source 3D medical multimodal benchmark demonstrate that Med-2E3 exhibits task-specific attention distribution and significantly outperforms current state-of-the-art models, with a 14% improvement in report generation and a 5% gain in medical visual question answering (VQA), highlighting the model's potential in addressing complex multimodal clinical tasks. The code will be released upon acceptance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12783v1-abstract-full').style.display = 'none'; document.getElementById('2411.12783v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12604">arXiv:2411.12604</a> <span> [<a href="https://arxiv.org/pdf/2411.12604">pdf</a>, <a href="https://arxiv.org/ps/2411.12604">ps</a>, <a href="https://arxiv.org/format/2411.12604">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SG-LRA: Self-Generating Automatic Scoliosis Cobb Angle Measurement with Low-Rank Approximation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Shao%2C+Z">Zhiwen Shao</a>, <a href="/search/?searchtype=author&query=Yuan%2C+Y">Yichen Yuan</a>, <a href="/search/?searchtype=author&query=Ma%2C+L">Lizhuang Ma</a>, <a href="/search/?searchtype=author&query=Yeung%2C+D">Dit-Yan Yeung</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiaojia Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12604v1-abstract-short" style="display: inline;"> Automatic Cobb angle measurement from X-ray images is crucial for scoliosis screening and diagnosis. However, most existing regression-based methods and segmentation-based methods struggle with inaccurate spine representations or mask connectivity/fragmentation issues. Besides, landmark-based methods suffer from insufficient training data and annotations. To address these challenges, we propose a… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12604v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12604v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12604v1-abstract-full" style="display: none;"> Automatic Cobb angle measurement from X-ray images is crucial for scoliosis screening and diagnosis. However, most existing regression-based methods and segmentation-based methods struggle with inaccurate spine representations or mask connectivity/fragmentation issues. Besides, landmark-based methods suffer from insufficient training data and annotations. To address these challenges, we propose a novel framework including Self-Generation pipeline and Low-Rank Approximation representation (SG-LRA) for automatic Cobb angle measurement. Specifically, we propose a parameterized spine contour representation based on LRA, which enables eigen-spine decomposition and spine contour reconstruction. We can directly obtain spine contour with only regressed LRA coefficients, which form a more accurate spine representation than rectangular boxes. Also, we combine LRA coefficient regression with anchor box classification to solve inaccurate predictions and mask connectivity issues. Moreover, we develop a data engine with automatic annotation and automatic selection in an iterative manner, which is trained on a private Spinal2023 dataset. With our data engine, we generate the largest scoliosis X-ray dataset named Spinal-AI2024 largely without privacy leaks. Extensive experiments on public AASCE2019, private Spinal2023, and generated Spinal-AI2024 datasets demonstrate that our method achieves state-of-the-art Cobb angle measurement performance. Our code and Spinal-AI2024 dataset are available at https://github.com/Ernestchenchen/SG-LRA and https://github.com/Ernestchenchen/Spinal-AI2024, respectively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12604v1-abstract-full').style.display = 'none'; document.getElementById('2411.12604v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12520">arXiv:2411.12520</a> <span> [<a href="https://arxiv.org/pdf/2411.12520">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> VMGNet: A Low Computational Complexity Robotic Grasping Network Based on VMamba with Multi-Scale Feature Fusion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Jin%2C+Y">Yuhao Jin</a>, <a href="/search/?searchtype=author&query=Gao%2C+Q">Qizhong Gao</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiaohui Zhu</a>, <a href="/search/?searchtype=author&query=Yue%2C+Y">Yong Yue</a>, <a href="/search/?searchtype=author&query=Lim%2C+E+G">Eng Gee Lim</a>, <a href="/search/?searchtype=author&query=Chen%2C+Y">Yuqing Chen</a>, <a href="/search/?searchtype=author&query=Wong%2C+P">Prudence Wong</a>, <a href="/search/?searchtype=author&query=Chu%2C+Y">Yijie Chu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12520v1-abstract-short" style="display: inline;"> While deep learning-based robotic grasping technology has demonstrated strong adaptability, its computational complexity has also significantly increased, making it unsuitable for scenarios with high real-time requirements. Therefore, we propose a low computational complexity and high accuracy model named VMGNet for robotic grasping. For the first time, we introduce the Visual State Space into the… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12520v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12520v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12520v1-abstract-full" style="display: none;"> While deep learning-based robotic grasping technology has demonstrated strong adaptability, its computational complexity has also significantly increased, making it unsuitable for scenarios with high real-time requirements. Therefore, we propose a low computational complexity and high accuracy model named VMGNet for robotic grasping. For the first time, we introduce the Visual State Space into the robotic grasping field to achieve linear computational complexity, thereby greatly reducing the model's computational cost. Meanwhile, to improve the accuracy of the model, we propose an efficient and lightweight multi-scale feature fusion module, named Fusion Bridge Module, to extract and fuse information at different scales. We also present a new loss function calculation method to enhance the importance differences between subtasks, improving the model's fitting ability. Experiments show that VMGNet has only 8.7G Floating Point Operations and an inference time of 8.1 ms on our devices. VMGNet also achieved state-of-the-art performance on the Cornell and Jacquard public datasets. To validate VMGNet's effectiveness in practical applications, we conducted real grasping experiments in multi-object scenarios, and VMGNet achieved an excellent performance with a 94.4% success rate in real-world grasping tasks. The video for the real-world robotic grasping experiments is available at https://youtu.be/S-QHBtbmLc4. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12520v1-abstract-full').style.display = 'none'; document.getElementById('2411.12520v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12178">arXiv:2411.12178</a> <span> [<a href="https://arxiv.org/pdf/2411.12178">pdf</a>, <a href="https://arxiv.org/format/2411.12178">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> First evidence for direct CP violation in beauty to charmonium decays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=LHCb+collaboration"> LHCb collaboration</a>, <a href="/search/?searchtype=author&query=Aaij%2C+R">R. Aaij</a>, <a href="/search/?searchtype=author&query=Abdelmotteleb%2C+A+S+W">A. S. W. Abdelmotteleb</a>, <a href="/search/?searchtype=author&query=Beteta%2C+C+A">C. Abellan Beteta</a>, <a href="/search/?searchtype=author&query=Abudin%C3%A9n%2C+F">F. Abudin茅n</a>, <a href="/search/?searchtype=author&query=Ackernley%2C+T">T. Ackernley</a>, <a href="/search/?searchtype=author&query=Adefisoye%2C+A+A">A. A. Adefisoye</a>, <a href="/search/?searchtype=author&query=Adeva%2C+B">B. Adeva</a>, <a href="/search/?searchtype=author&query=Adinolfi%2C+M">M. Adinolfi</a>, <a href="/search/?searchtype=author&query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&query=Agapopoulou%2C+C">C. Agapopoulou</a>, <a href="/search/?searchtype=author&query=Aidala%2C+C+A">C. A. Aidala</a>, <a href="/search/?searchtype=author&query=Ajaltouni%2C+Z">Z. Ajaltouni</a>, <a href="/search/?searchtype=author&query=Akar%2C+S">S. Akar</a>, <a href="/search/?searchtype=author&query=Akiba%2C+K">K. Akiba</a>, <a href="/search/?searchtype=author&query=Albicocco%2C+P">P. Albicocco</a>, <a href="/search/?searchtype=author&query=Albrecht%2C+J">J. Albrecht</a>, <a href="/search/?searchtype=author&query=Alessio%2C+F">F. Alessio</a>, <a href="/search/?searchtype=author&query=Alexander%2C+M">M. Alexander</a>, <a href="/search/?searchtype=author&query=Aliouche%2C+Z">Z. Aliouche</a>, <a href="/search/?searchtype=author&query=Cartelle%2C+P+A">P. Alvarez Cartelle</a>, <a href="/search/?searchtype=author&query=Amalric%2C+R">R. Amalric</a>, <a href="/search/?searchtype=author&query=Amato%2C+S">S. Amato</a>, <a href="/search/?searchtype=author&query=Amey%2C+J+L">J. L. Amey</a>, <a href="/search/?searchtype=author&query=Amhis%2C+Y">Y. Amhis</a> , et al. (1127 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12178v1-abstract-short" style="display: inline;"> The {\ensuremath{C\!P}}\xspace asymmetry and branching fraction of the CKM-suppressed decay \mbox{\ensuremath{{B^+}\!\to {J\mskip -3mu/\mskip -2mu蠄}{蟺^+}}} are precisely measured relative to the favoured decay \mbox{\ensuremath{{B^+}\!\to {J\mskip -3mu/\mskip -2mu蠄}{{K}^+}}}, using a sample of proton-proton collision data corresponding to an integrated luminosity of $5.4 \text{\,fb}^{-1}$ recorded… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12178v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12178v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12178v1-abstract-full" style="display: none;"> The {\ensuremath{C\!P}}\xspace asymmetry and branching fraction of the CKM-suppressed decay \mbox{\ensuremath{{B^+}\!\to {J\mskip -3mu/\mskip -2mu蠄}{蟺^+}}} are precisely measured relative to the favoured decay \mbox{\ensuremath{{B^+}\!\to {J\mskip -3mu/\mskip -2mu蠄}{{K}^+}}}, using a sample of proton-proton collision data corresponding to an integrated luminosity of $5.4 \text{\,fb}^{-1}$ recorded at center-of-mass energy of $13\text{\,Te\kern -0.1em V}$ during 2016--2018. The results of the {\ensuremath{C\!P}}\xspace asymmetry difference and branching fraction ratio are \begin{align*} 螖\mathcal{A}^{C\!P} &\equiv \mathcal{A}^{C\!P}({B}^+ \to {J}\mskip -3mu/\mskip -2mu蠄\,蟺^+) - \mathcal{A}^{C\!P}({B}^+ \to {J}\mskip -3mu/\mskip -2mu蠄\,K^+) = (1.29 \pm 0.49 \pm 0.08) \times 10^{-2}, \end{align*} \begin{equation*} \mathcal{R}_{蟺/K} \equiv \frac{\mathcal{B}(B^+ \!\to J\mskip -3mu/\mskip -2mu蠄\,蟺^+)} {\mathcal{B}(B^+ \!\to J\mskip -3mu/\mskip -2mu蠄\,K^+)} = (3.852 \pm 0.022 \pm 0.018) \times 10^{-2}. \end{equation*}where the first uncertainties are statistical and the second systematic. A combination with previous LHCb results based on data collected at $7$ and $8~\text{Te\kern -0.1em V}$ in 2011 and 2012 yields {$螖\mathcal{A}^{C\!P} = (1.42 \pm 0.43 \pm 0.08) \times 10^{-2}$ and $\mathcal{R}_{蟺/K} = (3.846 \pm 0.018 \pm 0.018) \times 10^{-2}$}. The combined $螖\mathcal{A}^{C\!P}$ value deviates from zero by 3.2 standard deviations, providing the first evidence for direct {\ensuremath{C\!P}}\xspace violation in the amplitudes of beauty decays to charmonium final states. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12178v1-abstract-full').style.display = 'none'; document.getElementById('2411.12178v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 2 figures, no conference or journal information All figures and tables, along with machine-readable versions and any supplementary material and additional information, are available at https://lbfence.cern.ch/alcm/public/analysis/full-details/1623/ (LHCb public pages)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> LHCb-PAPER-2024-031 CERN-EP-2024-286 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11648">arXiv:2411.11648</a> <span> [<a href="https://arxiv.org/pdf/2411.11648">pdf</a>, <a href="https://arxiv.org/ps/2411.11648">ps</a>, <a href="https://arxiv.org/format/2411.11648">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Phenomenology">hep-ph</span> </div> </div> <p class="title is-5 mathjax"> Evidence for Two Excited $惟^{-}$ Hyperons </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&query=Briere%2C+R+A">R. A. Briere</a>, <a href="/search/?searchtype=author&query=Brueggemann%2C+A">A. Brueggemann</a> , et al. (650 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11648v1-abstract-short" style="display: inline;"> Using $e^+e^-$ collision data corresponding to an integrated luminosity of 19 fb$^{-1}$ collected by the BESIII detector at center-of-mass energies ranging from 4.13 to 4.70 GeV, we report the first evidence for a new excited $惟^{-}$ hyperon, the $惟^*(2109)^{-}$, through the process $e^+ e^- \to 惟^*(2109)^{-} \bar惟^{+} +c.c.$ with a significance of 3.7 $蟽$. The mass and width of $惟^*(2109)^{-}$ ar… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11648v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11648v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11648v1-abstract-full" style="display: none;"> Using $e^+e^-$ collision data corresponding to an integrated luminosity of 19 fb$^{-1}$ collected by the BESIII detector at center-of-mass energies ranging from 4.13 to 4.70 GeV, we report the first evidence for a new excited $惟^{-}$ hyperon, the $惟^*(2109)^{-}$, through the process $e^+ e^- \to 惟^*(2109)^{-} \bar惟^{+} +c.c.$ with a significance of 3.7 $蟽$. The mass and width of $惟^*(2109)^{-}$ are measured to be $2108.8 \pm 5.5_{\rm stat} \pm 1.5_{\rm syst} {\rm MeV}/c^{2}$ and $21.6 \pm 17.7_{\rm stat} \pm 9.4_{\rm syst} {\rm MeV}$, respectively. We also present evidence for production of the $惟^*(2012)^{-}$ in the process $e^+ e^- \to 惟^*(2012)^{-} \bar惟^{+} +c.c.$ with a significance of 3.7 $蟽$. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11648v1-abstract-full').style.display = 'none'; document.getElementById('2411.11648v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 2 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11595">arXiv:2411.11595</a> <span> [<a href="https://arxiv.org/pdf/2411.11595">pdf</a>, <a href="https://arxiv.org/format/2411.11595">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Phenomenology">hep-ph</span> </div> </div> <p class="title is-5 mathjax"> Threshold Resummation for Semi-Inclusive Single-Hadron Production with Effective Field Theory </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Xu%2C+Z">Zhen Xu</a>, <a href="/search/?searchtype=author&query=Zhu%2C+H+X">Hua Xing Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11595v1-abstract-short" style="display: inline;"> Large double-logarithmic corrections are induced by soft gluon emissions near threshold in the semi-inclusive $e^+e^-$ annihilation (SIA) distributions, and must be resummed to all-orders in perturbation theory for reliable theoretical predictions. Building on strategy developed for threshold resummation for DIS structure function in momentum space using soft-collinear effective theory (SCET), we… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11595v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11595v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11595v1-abstract-full" style="display: none;"> Large double-logarithmic corrections are induced by soft gluon emissions near threshold in the semi-inclusive $e^+e^-$ annihilation (SIA) distributions, and must be resummed to all-orders in perturbation theory for reliable theoretical predictions. Building on strategy developed for threshold resummation for DIS structure function in momentum space using soft-collinear effective theory (SCET), we present the explicit formalism for SIA cross section. We then perform the resummation directly in momentum space for $纬^* \to q \bar q$, $H \to gg$ and $H \to b\bar b$ to N$^4$LL accuracy and demonstrate good convergence. We anticipate that these results will benefit the extraction of the light-quark, the heavy-quark as well as the gluon fragmentation functions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11595v1-abstract-full').style.display = 'none'; document.getElementById('2411.11595v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11543">arXiv:2411.11543</a> <span> [<a href="https://arxiv.org/pdf/2411.11543">pdf</a>, <a href="https://arxiv.org/format/2411.11543">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Enhancing Vision-Language Model Safety through Progressive Concept-Bottleneck-Driven Alignment </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Liu%2C+Z">Zhendong Liu</a>, <a href="/search/?searchtype=author&query=Nie%2C+Y">Yuanbi Nie</a>, <a href="/search/?searchtype=author&query=Tan%2C+Y">Yingshui Tan</a>, <a href="/search/?searchtype=author&query=Yue%2C+X">Xiangyu Yue</a>, <a href="/search/?searchtype=author&query=Cui%2C+Q">Qiushi Cui</a>, <a href="/search/?searchtype=author&query=Wang%2C+C">Chongjun Wang</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiaoyong Zhu</a>, <a href="/search/?searchtype=author&query=Zheng%2C+B">Bo Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11543v1-abstract-short" style="display: inline;"> Benefiting from the powerful capabilities of Large Language Models (LLMs), pre-trained visual encoder models connected to LLMs form Vision Language Models (VLMs). However, recent research shows that the visual modality in VLMs is highly vulnerable, allowing attackers to bypass safety alignment in LLMs through visually transmitted content, launching harmful attacks. To address this challenge, we pr… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11543v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11543v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11543v1-abstract-full" style="display: none;"> Benefiting from the powerful capabilities of Large Language Models (LLMs), pre-trained visual encoder models connected to LLMs form Vision Language Models (VLMs). However, recent research shows that the visual modality in VLMs is highly vulnerable, allowing attackers to bypass safety alignment in LLMs through visually transmitted content, launching harmful attacks. To address this challenge, we propose a progressive concept-based alignment strategy, PSA-VLM, which incorporates safety modules as concept bottlenecks to enhance visual modality safety alignment. By aligning model predictions with specific safety concepts, we improve defenses against risky images, enhancing explainability and controllability while minimally impacting general performance. Our method is obtained through two-stage training. The low computational cost of the first stage brings very effective performance improvement, and the fine-tuning of the language model in the second stage further improves the safety performance. Our method achieves state-of-the-art results on popular VLM safety benchmark. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11543v1-abstract-full').style.display = 'none'; document.getElementById('2411.11543v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: substantial text overlap with arXiv:2405.13581</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11069">arXiv:2411.11069</a> <span> [<a href="https://arxiv.org/pdf/2411.11069">pdf</a>, <a href="https://arxiv.org/format/2411.11069">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Skeleton-Guided Spatial-Temporal Feature Learning for Video-Based Visible-Infrared Person Re-Identification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Jiang%2C+W">Wenjia Jiang</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiaoke Zhu</a>, <a href="/search/?searchtype=author&query=Gao%2C+J">Jiakang Gao</a>, <a href="/search/?searchtype=author&query=Liao%2C+D">Di Liao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11069v1-abstract-short" style="display: inline;"> Video-based visible-infrared person re-identification (VVI-ReID) is challenging due to significant modality feature discrepancies. Spatial-temporal information in videos is crucial, but the accuracy of spatial-temporal information is often influenced by issues like low quality and occlusions in videos. Existing methods mainly focus on reducing modality differences, but pay limited attention to imp… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11069v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11069v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11069v1-abstract-full" style="display: none;"> Video-based visible-infrared person re-identification (VVI-ReID) is challenging due to significant modality feature discrepancies. Spatial-temporal information in videos is crucial, but the accuracy of spatial-temporal information is often influenced by issues like low quality and occlusions in videos. Existing methods mainly focus on reducing modality differences, but pay limited attention to improving spatial-temporal features, particularly for infrared videos. To address this, we propose a novel Skeleton-guided spatial-Temporal feAture leaRning (STAR) method for VVI-ReID. By using skeleton information, which is robust to issues such as poor image quality and occlusions, STAR improves the accuracy of spatial-temporal features in videos of both modalities. Specifically, STAR employs two levels of skeleton-guided strategies: frame level and sequence level. At the frame level, the robust structured skeleton information is used to refine the visual features of individual frames. At the sequence level, we design a feature aggregation mechanism based on skeleton key points graph, which learns the contribution of different body parts to spatial-temporal features, further enhancing the accuracy of global features. Experiments on benchmark datasets demonstrate that STAR outperforms state-of-the-art methods. Code will be open source soon. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11069v1-abstract-full').style.display = 'none'; document.getElementById('2411.11069v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10667">arXiv:2411.10667</a> <span> [<a href="https://arxiv.org/pdf/2411.10667">pdf</a>, <a href="https://arxiv.org/ps/2411.10667">ps</a>, <a href="https://arxiv.org/format/2411.10667">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Pattern Formation and Solitons">nlin.PS</span> </div> </div> <p class="title is-5 mathjax"> Solitons in composite linear-nonlinear moir茅 lattices </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zeng%2C+L">Liangwei Zeng</a>, <a href="/search/?searchtype=author&query=Malomed%2C+B+A">Boris A. Malomed</a>, <a href="/search/?searchtype=author&query=Mihalache%2C+D">Dumitru Mihalache</a>, <a href="/search/?searchtype=author&query=Li%2C+J">Jingzhen Li</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xing Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10667v1-abstract-short" style="display: inline;"> We produce families of two-dimensional gap solitons (GSs) maintained by moir茅 lattices (MLs) composed of linear and nonlinear sublattices, with the defocusing sign of the nonlinearity. Depending on the angle between the sublattices, the ML may be quasiperiodic or periodic, composed of mutually incommensurate or commensurate sublattices, respectively (in the latter case, the inter-lattice angle cor… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10667v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10667v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10667v1-abstract-full" style="display: none;"> We produce families of two-dimensional gap solitons (GSs) maintained by moir茅 lattices (MLs) composed of linear and nonlinear sublattices, with the defocusing sign of the nonlinearity. Depending on the angle between the sublattices, the ML may be quasiperiodic or periodic, composed of mutually incommensurate or commensurate sublattices, respectively (in the latter case, the inter-lattice angle corresponds to Pythagorean triples). The GSs include fundamental, quadrupole, and octupole solitons, as well as quadrupoles and octupoles carrying unitary vorticity. Stability segments of the GS families are identified by means of the linearized equation for small perturbations, and confirmed by direct simulations of perturbed evolution. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10667v1-abstract-full').style.display = 'none'; document.getElementById('2411.10667v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">4 figures, to be published in Optics Letters (2024)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Optics Letters, (2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10442">arXiv:2411.10442</a> <span> [<a href="https://arxiv.org/pdf/2411.10442">pdf</a>, <a href="https://arxiv.org/format/2411.10442">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Enhancing the Reasoning Ability of Multimodal Large Language Models via Mixed Preference Optimization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Wang%2C+W">Weiyun Wang</a>, <a href="/search/?searchtype=author&query=Chen%2C+Z">Zhe Chen</a>, <a href="/search/?searchtype=author&query=Wang%2C+W">Wenhai Wang</a>, <a href="/search/?searchtype=author&query=Cao%2C+Y">Yue Cao</a>, <a href="/search/?searchtype=author&query=Liu%2C+Y">Yangzhou Liu</a>, <a href="/search/?searchtype=author&query=Gao%2C+Z">Zhangwei Gao</a>, <a href="/search/?searchtype=author&query=Zhu%2C+J">Jinguo Zhu</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xizhou Zhu</a>, <a href="/search/?searchtype=author&query=Lu%2C+L">Lewei Lu</a>, <a href="/search/?searchtype=author&query=Qiao%2C+Y">Yu Qiao</a>, <a href="/search/?searchtype=author&query=Dai%2C+J">Jifeng Dai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10442v1-abstract-short" style="display: inline;"> Existing open-source multimodal large language models (MLLMs) generally follow a training process involving pre-training and supervised fine-tuning. However, these models suffer from distribution shifts, which limit their multimodal reasoning, particularly in the Chain-of-Thought (CoT) performance. To address this, we introduce a preference optimization (PO) process to enhance the multimodal reaso… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10442v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10442v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10442v1-abstract-full" style="display: none;"> Existing open-source multimodal large language models (MLLMs) generally follow a training process involving pre-training and supervised fine-tuning. However, these models suffer from distribution shifts, which limit their multimodal reasoning, particularly in the Chain-of-Thought (CoT) performance. To address this, we introduce a preference optimization (PO) process to enhance the multimodal reasoning capabilities of MLLMs. Specifically, (1) on the data side, we design an automated preference data construction pipeline to create MMPR, a high-quality, large-scale multimodal reasoning preference dataset. and (2) on the model side, we explore integrating PO with MLLMs, developing a simple yet effective method, termed Mixed Preference Optimization (MPO), which boosts multimodal CoT performance. Our approach demonstrates improved performance across multiple benchmarks, particularly in multimodal reasoning tasks. Notably, our model, InternVL2-8B-MPO, achieves an accuracy of 67.0 on MathVista, outperforming InternVL2-8B by 8.7 points and achieving performance comparable to the 10x larger InternVL2-76B. We hope this study could inspire further advancements in MLLMs. Code, data, and model shall be publicly released. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10442v1-abstract-full').style.display = 'none'; document.getElementById('2411.10442v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10219">arXiv:2411.10219</a> <span> [<a href="https://arxiv.org/pdf/2411.10219">pdf</a>, <a href="https://arxiv.org/format/2411.10219">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Constraints on the photon polarisation in $b \to s 纬$ transitions using $B_s^0 \rightarrow 蠁e^+e^-$ decays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=LHCb+collaboration"> LHCb collaboration</a>, <a href="/search/?searchtype=author&query=Aaij%2C+R">R. Aaij</a>, <a href="/search/?searchtype=author&query=Abdelmotteleb%2C+A+S+W">A. S. W. Abdelmotteleb</a>, <a href="/search/?searchtype=author&query=Beteta%2C+C+A">C. Abellan Beteta</a>, <a href="/search/?searchtype=author&query=Abudin%C3%A9n%2C+F">F. Abudin茅n</a>, <a href="/search/?searchtype=author&query=Ackernley%2C+T">T. Ackernley</a>, <a href="/search/?searchtype=author&query=Adefisoye%2C+A+A">A. A. Adefisoye</a>, <a href="/search/?searchtype=author&query=Adeva%2C+B">B. Adeva</a>, <a href="/search/?searchtype=author&query=Adinolfi%2C+M">M. Adinolfi</a>, <a href="/search/?searchtype=author&query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&query=Agapopoulou%2C+C">C. Agapopoulou</a>, <a href="/search/?searchtype=author&query=Aidala%2C+C+A">C. A. Aidala</a>, <a href="/search/?searchtype=author&query=Ajaltouni%2C+Z">Z. Ajaltouni</a>, <a href="/search/?searchtype=author&query=Akar%2C+S">S. Akar</a>, <a href="/search/?searchtype=author&query=Akiba%2C+K">K. Akiba</a>, <a href="/search/?searchtype=author&query=Albicocco%2C+P">P. Albicocco</a>, <a href="/search/?searchtype=author&query=Albrecht%2C+J">J. Albrecht</a>, <a href="/search/?searchtype=author&query=Alessio%2C+F">F. Alessio</a>, <a href="/search/?searchtype=author&query=Alexander%2C+M">M. Alexander</a>, <a href="/search/?searchtype=author&query=Aliouche%2C+Z">Z. Aliouche</a>, <a href="/search/?searchtype=author&query=Cartelle%2C+P+A">P. Alvarez Cartelle</a>, <a href="/search/?searchtype=author&query=Amalric%2C+R">R. Amalric</a>, <a href="/search/?searchtype=author&query=Amato%2C+S">S. Amato</a>, <a href="/search/?searchtype=author&query=Amey%2C+J+L">J. L. Amey</a>, <a href="/search/?searchtype=author&query=Amhis%2C+Y">Y. Amhis</a> , et al. (1120 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10219v2-abstract-short" style="display: inline;"> An angular analysis of the $B_s^0 \rightarrow 蠁e^+e^-$ decay is performed using the proton-proton collision dataset collected between 2011 and 2018 by the LHCb experiment, corresponding to an integrated luminosity of $9\,{\rm fb}^{-1}$ at centre-of-mass energies of 7, 8 and $13\,{\rm TeV}$. The analysis is performed in the very low dielectron invariant mass-squared region between $0.0009$ and… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10219v2-abstract-full').style.display = 'inline'; document.getElementById('2411.10219v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10219v2-abstract-full" style="display: none;"> An angular analysis of the $B_s^0 \rightarrow 蠁e^+e^-$ decay is performed using the proton-proton collision dataset collected between 2011 and 2018 by the LHCb experiment, corresponding to an integrated luminosity of $9\,{\rm fb}^{-1}$ at centre-of-mass energies of 7, 8 and $13\,{\rm TeV}$. The analysis is performed in the very low dielectron invariant mass-squared region between $0.0009$ and $0.2615\,{\rm GeV}^2\!/c^4$. The longitudinal polarisation fraction of the $蠁$ meson is measured to be less than $11.5\%$ at $90\%$ confidence level. The $A_{\mathrm{T}}^{\mathcal{R}e C\!P}$ observable, which is related to the lepton forward-backward asymmetry, is measured to be $0.116 \pm 0.155 \pm 0.006$, where the first uncertainty is statistical and the second systematic. The transverse asymmetries, $A_{\mathrm{T}}^{(2)}$ and $A_{\mathrm{T}}^{\mathcal{I}m C\!P}$ , which are sensitive to the virtual photon polarisation, are found to be $-0.045 \pm 0.235 \pm 0.014$ and $0.002 \pm 0.247 \pm 0.016$, respectively. The results are consistent with Standard Model predictions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10219v2-abstract-full').style.display = 'none'; document.getElementById('2411.10219v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">21 pages, 4 figures. All figures and tables, along with any supplementary material and additional information, are available at https://lbfence.cern.ch/alcm/public/analysis/full-details/3433/ (LHCb public pages)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> LHCb-PAPER-2024-030, CERN-EP-2024-276 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10060">arXiv:2411.10060</a> <span> [<a href="https://arxiv.org/pdf/2411.10060">pdf</a>, <a href="https://arxiv.org/format/2411.10060">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> CMATH: Cross-Modality Augmented Transformer with Hierarchical Variational Distillation for Multimodal Emotion Recognition in Conversation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiaofei Zhu</a>, <a href="/search/?searchtype=author&query=Cheng%2C+J">Jiawei Cheng</a>, <a href="/search/?searchtype=author&query=Yang%2C+Z">Zhou Yang</a>, <a href="/search/?searchtype=author&query=Chen%2C+Z">Zhuo Chen</a>, <a href="/search/?searchtype=author&query=Wang%2C+Q">Qingyang Wang</a>, <a href="/search/?searchtype=author&query=Yao%2C+J">Jianfeng Yao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10060v1-abstract-short" style="display: inline;"> Multimodal emotion recognition in conversation (MER) aims to accurately identify emotions in conversational utterances by integrating multimodal information. Previous methods usually treat multimodal information as equal quality and employ symmetric architectures to conduct multimodal fusion. However, in reality, the quality of different modalities usually varies considerably, and utilizing a symm… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10060v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10060v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10060v1-abstract-full" style="display: none;"> Multimodal emotion recognition in conversation (MER) aims to accurately identify emotions in conversational utterances by integrating multimodal information. Previous methods usually treat multimodal information as equal quality and employ symmetric architectures to conduct multimodal fusion. However, in reality, the quality of different modalities usually varies considerably, and utilizing a symmetric architecture is difficult to accurately recognize conversational emotions when dealing with uneven modal information. Furthermore, fusing multi-modality information in a single granularity may fail to adequately integrate modal information, exacerbating the inaccuracy in emotion recognition. In this paper, we propose a novel Cross-Modality Augmented Transformer with Hierarchical Variational Distillation, called CMATH, which consists of two major components, i.e., Multimodal Interaction Fusion and Hierarchical Variational Distillation. The former is comprised of two submodules, including Modality Reconstruction and Cross-Modality Augmented Transformer (CMA-Transformer), where Modality Reconstruction focuses on obtaining high-quality compressed representation of each modality, and CMA-Transformer adopts an asymmetric fusion strategy which treats one modality as the central modality and takes others as auxiliary modalities. The latter first designs a variational fusion network to fuse the fine-grained representations learned by CMA- Transformer into a coarse-grained representations. Then, it introduces a hierarchical distillation framework to maintain the consistency between modality representations with different granularities. Experiments on the IEMOCAP and MELD datasets demonstrate that our proposed model outperforms previous state-of-the-art baselines. Implementation codes can be available at https://github.com/ cjw-MER/CMATH. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10060v1-abstract-full').style.display = 'none'; document.getElementById('2411.10060v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09907">arXiv:2411.09907</a> <span> [<a href="https://arxiv.org/pdf/2411.09907">pdf</a>, <a href="https://arxiv.org/format/2411.09907">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Strongly Correlated Electrons">cond-mat.str-el</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Materials Science">cond-mat.mtrl-sci</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Superconductivity">cond-mat.supr-con</span> </div> </div> <p class="title is-5 mathjax"> Density-wave like behavior in a new Kagome material Ce$_{2}$Ru$_{3}$Si </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Wang%2C+J">Jinhua Wang</a>, <a href="/search/?searchtype=author&query=Fan%2C+S">Shengtai Fan</a>, <a href="/search/?searchtype=author&query=Li%2C+Y">Yiwen Li</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiyu Zhu</a>, <a href="/search/?searchtype=author&query=Wen%2C+H">Hai-hu Wen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09907v1-abstract-short" style="display: inline;"> Kagome materials with inherent geometric frustration can produce many interesting physical properties, such as flat bands, quantum spin liquid, chiral magnetism, superconductivity and density-wave orders. Sometimes, the localized 4$f$ electrons from Ce atoms coupled with other conduction electrons would also give rise to the flat bands near the Fermi level, and results in the formation of heavy fe… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09907v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09907v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09907v1-abstract-full" style="display: none;"> Kagome materials with inherent geometric frustration can produce many interesting physical properties, such as flat bands, quantum spin liquid, chiral magnetism, superconductivity and density-wave orders. Sometimes, the localized 4$f$ electrons from Ce atoms coupled with other conduction electrons would also give rise to the flat bands near the Fermi level, and results in the formation of heavy fermion. Thus, it is highly probable that kagome material incorporating Ce element will display nontrivial physical properties. In this study, we present a new Kagome material belonging to the trinary Laves phase, Ce$_{2}$Ru$_{3}$Si, in which kagome plane is formed by Ru atoms. Electrical transport and specific heat measurements reveal a density-wave like transition. A Curie-Weiss behavior is observed in low-temperature region. Meanwhile we also find a relatively large specific coefficient $纬_{n}(0)$. The calculated Wilson ratio $R_\mathrm{W}\propto{蠂(0)/纬_{n}}$ is approximately 3.1, indicating a moderate electron correlation effect. Chemical doping of Ir at the Ru site rapidly suppresses this density-wave like transition, while Mo doping leads to a gradual decrease in transition temperature. Theoretical calculation indicates both the Ce-4$f$ and Ru-4$d$ electronic bands cross the Fermi level, forming a Mexican-hat-shape Fermi surface close to the Fermi energy, potentially accounting for the observed density-wave like transition. Our findings provide an useful platform for investigating how hybridization between 4$f$ and 4$d$ electrons influences the electronic transport, and the relationship between the density-wave transition and kagome structure. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09907v1-abstract-full').style.display = 'none'; document.getElementById('2411.09907v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15pages, 4 figures,2 supplementary figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09421">arXiv:2411.09421</a> <span> [<a href="https://arxiv.org/pdf/2411.09421">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Materials Science">cond-mat.mtrl-sci</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> </div> <p class="title is-5 mathjax"> A 2D van der Waals Material for Terahertz Emission with Giant Optical Rectification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Handa%2C+T">Taketo Handa</a>, <a href="/search/?searchtype=author&query=Huang%2C+C">Chun-Ying Huang</a>, <a href="/search/?searchtype=author&query=Li%2C+Y">Yiliu Li</a>, <a href="/search/?searchtype=author&query=Olsen%2C+N">Nicholas Olsen</a>, <a href="/search/?searchtype=author&query=Chica%2C+D+G">Daniel G. Chica</a>, <a href="/search/?searchtype=author&query=Xu%2C+D+D">David D. Xu</a>, <a href="/search/?searchtype=author&query=Sturm%2C+F">Felix Sturm</a>, <a href="/search/?searchtype=author&query=McIver%2C+J+W">James W. McIver</a>, <a href="/search/?searchtype=author&query=Roy%2C+X">Xavier Roy</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiaoyang Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09421v1-abstract-short" style="display: inline;"> Exfoliation and stacking of two-dimensional (2D) van der Waals (vdW) crystals have created unprecedented opportunities in the discovery of quantum phases. A major obstacle to the advancement of this field is the limited spectroscopic access due to a mismatch in sample sizes (1 - 10 micrometer) and wavelengths (0.1 - 1 millimeter) of electromagnetic radiation relevant to their low-energy excitation… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09421v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09421v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09421v1-abstract-full" style="display: none;"> Exfoliation and stacking of two-dimensional (2D) van der Waals (vdW) crystals have created unprecedented opportunities in the discovery of quantum phases. A major obstacle to the advancement of this field is the limited spectroscopic access due to a mismatch in sample sizes (1 - 10 micrometer) and wavelengths (0.1 - 1 millimeter) of electromagnetic radiation relevant to their low-energy excitations. Here, we introduce a new member of the 2D vdW material family: a terahertz (THz) emitter. We show intense and broadband THz generation from the vdW ferroelectric semiconductor NbOI2 with optical rectification efficiency over one-order-of-magnitude higher than that of the current standard THz emitter, ZnTe. The NbOI2 THz emitter can be easily integrated into vdW heterostructures for on-chip near-field THz spectroscopy of a target vdW material/device. Our approach provides a general spectroscopic tool for the rapidly expanding field of 2D vdW materials and quantum matter. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09421v1-abstract-full').style.display = 'none'; document.getElementById('2411.09421v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 3 figures, 15 pages of Supplementary Information</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09345">arXiv:2411.09345</a> <span> [<a href="https://arxiv.org/pdf/2411.09345">pdf</a>, <a href="https://arxiv.org/format/2411.09345">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> DarkSHINE Baseline Design Report: Physics Prospects and Detector Technologies </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Chen%2C+J">Jing Chen</a>, <a href="/search/?searchtype=author&query=Chen%2C+J">Ji-Yuan Chen</a>, <a href="/search/?searchtype=author&query=Chen%2C+J">Jun-Feng Chen</a>, <a href="/search/?searchtype=author&query=Chen%2C+X">Xiang Chen</a>, <a href="/search/?searchtype=author&query=Fu%2C+C">Chang-Bo Fu</a>, <a href="/search/?searchtype=author&query=Guo%2C+J">Jun Guo</a>, <a href="/search/?searchtype=author&query=Guo%2C+Y">Yi-Han Guo</a>, <a href="/search/?searchtype=author&query=Khaw%2C+K+S">Kim Siang Khaw</a>, <a href="/search/?searchtype=author&query=Li%2C+J">Jia-Lin Li</a>, <a href="/search/?searchtype=author&query=Li%2C+L">Liang Li</a>, <a href="/search/?searchtype=author&query=Li%2C+S">Shu Li</a>, <a href="/search/?searchtype=author&query=Lin%2C+Y">Yu-ming Lin</a>, <a href="/search/?searchtype=author&query=Liu%2C+D">Dan-Ning Liu</a>, <a href="/search/?searchtype=author&query=Liu%2C+K">Kang Liu</a>, <a href="/search/?searchtype=author&query=Liu%2C+K">Kun Liu</a>, <a href="/search/?searchtype=author&query=Liu%2C+Q">Qi-Bin Liu</a>, <a href="/search/?searchtype=author&query=Liu%2C+Z">Zhi Liu</a>, <a href="/search/?searchtype=author&query=Lu%2C+Z">Ze-Jia Lu</a>, <a href="/search/?searchtype=author&query=Lv%2C+M">Meng Lv</a>, <a href="/search/?searchtype=author&query=Song%2C+S">Si-Yuan Song</a>, <a href="/search/?searchtype=author&query=Sun%2C+T">Tong Sun</a>, <a href="/search/?searchtype=author&query=Tang%2C+J">Jian-Nan Tang</a>, <a href="/search/?searchtype=author&query=Wan%2C+W">Wei-Shi Wan</a>, <a href="/search/?searchtype=author&query=Wang%2C+D">Dong Wang</a>, <a href="/search/?searchtype=author&query=Wang%2C+X">Xiao-Long Wang</a> , et al. (17 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09345v1-abstract-short" style="display: inline;"> DarkSHINE is a newly proposed fixed-target experiment initiative to search for the invisible decay of Dark Photon via missing energy/momentum signatures, based on the high repetition rate electron beam to be deployed/delivered by the Shanghai High repetition rate XFEL and Extreme light facility (SHINE). This report elaborates the baseline design of DarkSHINE experiment by introducing the physics g… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09345v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09345v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09345v1-abstract-full" style="display: none;"> DarkSHINE is a newly proposed fixed-target experiment initiative to search for the invisible decay of Dark Photon via missing energy/momentum signatures, based on the high repetition rate electron beam to be deployed/delivered by the Shanghai High repetition rate XFEL and Extreme light facility (SHINE). This report elaborates the baseline design of DarkSHINE experiment by introducing the physics goals, experimental setups, details of each sub-detector system technical designs, signal and backgground modelings, expected search sensitivities and future prospects, which mark an important step towards the further prototyping and technical demonstrations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09345v1-abstract-full').style.display = 'none'; document.getElementById('2411.09345v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09343">arXiv:2411.09343</a> <span> [<a href="https://arxiv.org/pdf/2411.09343">pdf</a>, <a href="https://arxiv.org/format/2411.09343">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Nuclear Experiment">nucl-ex</span> </div> </div> <p class="title is-5 mathjax"> Measurement of $蠁(1020)$ meson production in fixed-target $\textit{p}$Ne collisions at $\sqrt{s_{NN}}$ = 68.5 GeV </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=LHCb+collaboration"> LHCb collaboration</a>, <a href="/search/?searchtype=author&query=Aaij%2C+R">R. Aaij</a>, <a href="/search/?searchtype=author&query=Abdelmotteleb%2C+A+S+W">A. S. W. Abdelmotteleb</a>, <a href="/search/?searchtype=author&query=Beteta%2C+C+A">C. Abellan Beteta</a>, <a href="/search/?searchtype=author&query=Abudin%C3%A9n%2C+F">F. Abudin茅n</a>, <a href="/search/?searchtype=author&query=Ackernley%2C+T">T. Ackernley</a>, <a href="/search/?searchtype=author&query=Adefisoye%2C+A+A">A. A. Adefisoye</a>, <a href="/search/?searchtype=author&query=Adeva%2C+B">B. Adeva</a>, <a href="/search/?searchtype=author&query=Adinolfi%2C+M">M. Adinolfi</a>, <a href="/search/?searchtype=author&query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&query=Agapopoulou%2C+C">C. Agapopoulou</a>, <a href="/search/?searchtype=author&query=Aidala%2C+C+A">C. A. Aidala</a>, <a href="/search/?searchtype=author&query=Ajaltouni%2C+Z">Z. Ajaltouni</a>, <a href="/search/?searchtype=author&query=Akar%2C+S">S. Akar</a>, <a href="/search/?searchtype=author&query=Akiba%2C+K">K. Akiba</a>, <a href="/search/?searchtype=author&query=Albicocco%2C+P">P. Albicocco</a>, <a href="/search/?searchtype=author&query=Albrecht%2C+J">J. Albrecht</a>, <a href="/search/?searchtype=author&query=Alessio%2C+F">F. Alessio</a>, <a href="/search/?searchtype=author&query=Alexander%2C+M">M. Alexander</a>, <a href="/search/?searchtype=author&query=Aliouche%2C+Z">Z. Aliouche</a>, <a href="/search/?searchtype=author&query=Cartelle%2C+P+A">P. Alvarez Cartelle</a>, <a href="/search/?searchtype=author&query=Amalric%2C+R">R. Amalric</a>, <a href="/search/?searchtype=author&query=Amato%2C+S">S. Amato</a>, <a href="/search/?searchtype=author&query=Amey%2C+J+L">J. L. Amey</a>, <a href="/search/?searchtype=author&query=Amhis%2C+Y">Y. Amhis</a> , et al. (1127 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09343v1-abstract-short" style="display: inline;"> The first measurement of $蠁(1020)$ meson production in fixed-target $p$Ne collisions at $\sqrt{s_{NN}}=68.5$ GeV is presented. The $蠁(1020)$ mesons are reconstructed in their $K^{+}K^{-}$ decay in a data sample consisting of proton collisions on neon nuclei at rest, corresponding to an integrated luminosity of $21.7 \pm 1.4$ nb$^{-1}$, collected by the LHCb detector at CERN. The $蠁(1020)$ producti… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09343v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09343v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09343v1-abstract-full" style="display: none;"> The first measurement of $蠁(1020)$ meson production in fixed-target $p$Ne collisions at $\sqrt{s_{NN}}=68.5$ GeV is presented. The $蠁(1020)$ mesons are reconstructed in their $K^{+}K^{-}$ decay in a data sample consisting of proton collisions on neon nuclei at rest, corresponding to an integrated luminosity of $21.7 \pm 1.4$ nb$^{-1}$, collected by the LHCb detector at CERN. The $蠁(1020)$ production cross-section in the centre-of-mass rapidity range of $-1.8<y^*<0$ and transverse momentum range of $800<p_{T}<6500$ MeV/c is found to be $蟽=182.7\pm2.7~\text{(stat.)}\pm14.1~\text{(syst)}~渭$b/nucleon. A double-differential measurement of the cross-section is also provided in four regions of rapidity and six regions of transverse momentum of the $蠁(1020)$ meson and compared with the predictions from Pythia and EPOS4, which are found to underestimate the experimental values. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09343v1-abstract-full').style.display = 'none'; document.getElementById('2411.09343v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">All figures and tables, along with machine-readable versions and any supplementary material and additional information, are available at https://lbfence.cern.ch/alcm/public/analysis/full-details/3673/ (LHCb public pages)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> LHCb-PAPER-2024-036, CERN-EP-2024-274 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09307">arXiv:2411.09307</a> <span> [<a href="https://arxiv.org/pdf/2411.09307">pdf</a>, <a href="https://arxiv.org/ps/2411.09307">ps</a>, <a href="https://arxiv.org/format/2411.09307">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Model-Based Event-Triggered Implementation of Hybrid Controllers Using Finite-Time Convergent Observers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zhu%2C+X">Xuanzhi Zhu</a>, <a href="/search/?searchtype=author&query=Casau%2C+P">Pedro Casau</a>, <a href="/search/?searchtype=author&query=Silvestre%2C+C">Carlos Silvestre</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09307v1-abstract-short" style="display: inline;"> In this paper, we explore the conditions for asymptotic stability of the hybrid closed-loop system resulting from the interconnection of a nonlinear plant, an intelligent sensor that generates finite-time convergent estimates of the plant state, and a controller node that receives opportunistic samples from the sensor node when certain model-based event-triggering conditions are met. The proposed… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09307v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09307v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09307v1-abstract-full" style="display: none;"> In this paper, we explore the conditions for asymptotic stability of the hybrid closed-loop system resulting from the interconnection of a nonlinear plant, an intelligent sensor that generates finite-time convergent estimates of the plant state, and a controller node that receives opportunistic samples from the sensor node when certain model-based event-triggering conditions are met. The proposed method is endowed with a degree of separation, in the sense that the controller design is independent of the sensor design. This is achieved under mild regularity conditions imposed on the hybrid closed-loop system and the existence of persistently flowing solutions. We demonstrate the versatility of the method by implementing it on: 1) a sampled-data controller for regulation of linear plants; 2) a synergistic controller for attitude stabilization of rigid bodies. The effectiveness of these novel controllers is demonstrated through numerical simulations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09307v1-abstract-full').style.display = 'none'; document.getElementById('2411.09307v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09301">arXiv:2411.09301</a> <span> [<a href="https://arxiv.org/pdf/2411.09301">pdf</a>, <a href="https://arxiv.org/format/2411.09301">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> LHRS-Bot-Nova: Improved Multimodal Large Language Model for Remote Sensing Vision-Language Interpretation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Li%2C+Z">Zhenshi Li</a>, <a href="/search/?searchtype=author&query=Muhtar%2C+D">Dilxat Muhtar</a>, <a href="/search/?searchtype=author&query=Gu%2C+F">Feng Gu</a>, <a href="/search/?searchtype=author&query=Zhang%2C+X">Xueliang Zhang</a>, <a href="/search/?searchtype=author&query=Xiao%2C+P">Pengfeng Xiao</a>, <a href="/search/?searchtype=author&query=He%2C+G">Guangjun He</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiaoxiang Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09301v1-abstract-short" style="display: inline;"> Automatically and rapidly understanding Earth's surface is fundamental to our grasp of the living environment and informed decision-making. This underscores the need for a unified system with comprehensive capabilities in analyzing Earth's surface to address a wide range of human needs. The emergence of multimodal large language models (MLLMs) has great potential in boosting the efficiency and con… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09301v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09301v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09301v1-abstract-full" style="display: none;"> Automatically and rapidly understanding Earth's surface is fundamental to our grasp of the living environment and informed decision-making. This underscores the need for a unified system with comprehensive capabilities in analyzing Earth's surface to address a wide range of human needs. The emergence of multimodal large language models (MLLMs) has great potential in boosting the efficiency and convenience of intelligent Earth observation. These models can engage in human-like conversations, serve as unified platforms for understanding images, follow diverse instructions, and provide insightful feedbacks. In this study, we introduce LHRS-Bot-Nova, an MLLM specialized in understanding remote sensing (RS) images, designed to expertly perform a wide range of RS understanding tasks aligned with human instructions. LHRS-Bot-Nova features an enhanced vision encoder and a novel bridge layer, enabling efficient visual compression and better language-vision alignment. To further enhance RS-oriented vision-language alignment, we propose a large-scale RS image-caption dataset, generated through feature-guided image recaptioning. Additionally, we introduce an instruction dataset specifically designed to improve spatial recognition abilities. Extensive experiments demonstrate superior performance of LHRS-Bot-Nova across various RS image understanding tasks. We also evaluate different MLLM performances in complex RS perception and instruction following using a complicated multi-choice question evaluation benchmark, providing a reliable guide for future model selection and improvement. Data, code, and models will be available at https://github.com/NJU-LHRS/LHRS-Bot. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09301v1-abstract-full').style.display = 'none'; document.getElementById('2411.09301v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.08680">arXiv:2411.08680</a> <span> [<a href="https://arxiv.org/pdf/2411.08680">pdf</a>, <a href="https://arxiv.org/format/2411.08680">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Integrated Precoder and Trajectory Design for MIMO UAV-Assisted Relay System With Finite-Alphabet Inputs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Di%2C+H">Haoyang Di</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiaodong Zhu</a>, <a href="/search/?searchtype=author&query=Shao%2C+Y">Yulin Shao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.08680v1-abstract-short" style="display: inline;"> Unmanned aerial vehicles (UAVs) are gaining widespread use in wireless relay systems due to their exceptional flexibility and cost-effectiveness. This paper focuses on the integrated design of UAV trajectories and the precoders at both the transmitter and UAV in a UAV-assisted relay communication system, accounting for transmit power constraints and UAV flight limitations. Unlike previous works th… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08680v1-abstract-full').style.display = 'inline'; document.getElementById('2411.08680v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.08680v1-abstract-full" style="display: none;"> Unmanned aerial vehicles (UAVs) are gaining widespread use in wireless relay systems due to their exceptional flexibility and cost-effectiveness. This paper focuses on the integrated design of UAV trajectories and the precoders at both the transmitter and UAV in a UAV-assisted relay communication system, accounting for transmit power constraints and UAV flight limitations. Unlike previous works that primarily address multiple-input single-output (MISO) systems with Gaussian inputs, we investigate a more realistic scenario involving multiple-input multiple-output (MIMO) systems with finite-alphabet inputs. To tackle the challenging and inherently non-convex problem, we propose an efficient solution algorithm that leverages successive convex approximation and alternating optimization techniques. Simulation results validate the effectiveness of the proposed algorithm, demonstrating its capability to optimize system performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08680v1-abstract-full').style.display = 'none'; document.getElementById('2411.08680v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.08413">arXiv:2411.08413</a> <span> [<a href="https://arxiv.org/pdf/2411.08413">pdf</a>, <a href="https://arxiv.org/format/2411.08413">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Inference-Aware State Reconstruction for Industrial Metaverse under Synchronous/Asynchronous Short-Packet Transmission </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Xiong%2C+Q">Qinqin Xiong</a>, <a href="/search/?searchtype=author&query=Cao%2C+J">Jie Cao</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xu Zhu</a>, <a href="/search/?searchtype=author&query=Jiang%2C+Y">Yufei Jiang</a>, <a href="/search/?searchtype=author&query=Pappas%2C+N">Nikolaos Pappas</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.08413v1-abstract-short" style="display: inline;"> We consider a real-time state reconstruction system for industrial metaverse. The time-varying physical process states in real space are captured by multiple sensors via wireless links, and then reconstructed in virtual space. In this paper, we use the spatial-temporal correlation of the sensor data of interest to infer the real-time data of the target sensor to reduce the mean squared error (MSE)… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08413v1-abstract-full').style.display = 'inline'; document.getElementById('2411.08413v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.08413v1-abstract-full" style="display: none;"> We consider a real-time state reconstruction system for industrial metaverse. The time-varying physical process states in real space are captured by multiple sensors via wireless links, and then reconstructed in virtual space. In this paper, we use the spatial-temporal correlation of the sensor data of interest to infer the real-time data of the target sensor to reduce the mean squared error (MSE) of reconstruction for industrial metaverse under short-packet transmission (SPT). Both synchronous and asynchronous transmission modes for multiple sensors are considered. It is proved that the average MSE of reconstruction and average block error probability (BLEP) have a positive correlation under inference with synchronous transmission scheme, and they have a negative correlation in some conditions under inference with asynchronous transmission scheme. Also, it is proved that the average MSE of reconstruction with inference can be significantly lower than that without inference, even under weak mean squared spatial correlation (MSSC). In addition, closed-form MSSC thresholds are derived for the superiority regions of the inference with synchronous transmission and inference with asynchronous transmission schemes, respectively. Adaptations of blocklength and time shift of asynchronous transmission are conducted to minimize the average MSE of reconstruction. Simulation results show that the two schemes significantly outperform the no inference case, with an average MSE reduction of more than 50%. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08413v1-abstract-full').style.display = 'none'; document.getElementById('2411.08413v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.07870">arXiv:2411.07870</a> <span> [<a href="https://arxiv.org/pdf/2411.07870">pdf</a>, <a href="https://arxiv.org/format/2411.07870">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Trustful LLMs: Customizing and Grounding Text Generation with Knowledge Bases and Dual Decoders </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiaofeng Zhu</a>, <a href="/search/?searchtype=author&query=Mandivarapu%2C+J+K">Jaya Krishna Mandivarapu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.07870v2-abstract-short" style="display: inline;"> Although people are impressed by the content generation skills of large language models, the use of LLMs, such as ChatGPT, is limited by the domain grounding of the content. The correctness and groundedness of the generated content need to be based on a verified context, such as results from Retrieval-Augmented Generation (RAG). One important issue when adapting LLMs to a customized domain is that… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07870v2-abstract-full').style.display = 'inline'; document.getElementById('2411.07870v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.07870v2-abstract-full" style="display: none;"> Although people are impressed by the content generation skills of large language models, the use of LLMs, such as ChatGPT, is limited by the domain grounding of the content. The correctness and groundedness of the generated content need to be based on a verified context, such as results from Retrieval-Augmented Generation (RAG). One important issue when adapting LLMs to a customized domain is that the generated responses are often incomplete, or the additions are not verified and may even be hallucinated. Prior studies on hallucination detection have focused on evaluation metrics, which are not easily adaptable to dynamic domains and can be vulnerable to attacks like jail-breaking. In this work, we propose 1) a post-processing algorithm that leverages knowledge triplets in RAG context to correct hallucinations and 2) a dual-decoder model that fuses RAG context to guide the generation process. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07870v2-abstract-full').style.display = 'none'; document.getElementById('2411.07870v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> EMNLP CustomNLP4U 2024 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.07730">arXiv:2411.07730</a> <span> [<a href="https://arxiv.org/pdf/2411.07730">pdf</a>, <a href="https://arxiv.org/ps/2411.07730">ps</a>, <a href="https://arxiv.org/format/2411.07730">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Study of the light scalar $a_{0}(980)$ through the decay $D^{0} \to a_{0}(980)^-e^{+} 谓_{e}$ with $a_{0}(980)^- \to 畏蟺^-$ </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&query=An%2C+Q">Q. An</a>, <a href="/search/?searchtype=author&query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&query=Briere%2C+R+A">R. A. Briere</a> , et al. (649 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.07730v1-abstract-short" style="display: inline;"> Using 7.93 ${\rm fb^{-1}}$ of $e^+e^-$ collision data collected at a center-of-mass energy of 3.773 ${\rm GeV}$ with the BESIII detector, we present an analysis of the decay $D^{0} \to 畏蟺^- e^+ 谓_{e}$. The branching fraction of the decay $D^{0} \to a_{0}(980)^{-} e^+ 谓_{e}$ with $a_{0}(980)^{-} \to 畏蟺^{-}$ is measured to be $(0.86\pm0.17_{\text{stat}}\pm0.05_{\text{syst}})\times 10^{-4}$. The deca… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07730v1-abstract-full').style.display = 'inline'; document.getElementById('2411.07730v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.07730v1-abstract-full" style="display: none;"> Using 7.93 ${\rm fb^{-1}}$ of $e^+e^-$ collision data collected at a center-of-mass energy of 3.773 ${\rm GeV}$ with the BESIII detector, we present an analysis of the decay $D^{0} \to 畏蟺^- e^+ 谓_{e}$. The branching fraction of the decay $D^{0} \to a_{0}(980)^{-} e^+ 谓_{e}$ with $a_{0}(980)^{-} \to 畏蟺^{-}$ is measured to be $(0.86\pm0.17_{\text{stat}}\pm0.05_{\text{syst}})\times 10^{-4}$. The decay dynamics of this process is studied with a single-pole parameterization of the hadronic form factor and the Flatt茅 formula describing the $a_0(980)$ line shape in the differential decay rate. The product of the form factor $f^{ a_0}_{+}(0)$ and the Cabibbo-Kobayashi-Maskawa matrix element $|V_{cd}|$ is determined for the first time with the result $f^{ a_0}_+(0)|V_{cd}|=0.126\pm0.013_{\rm stat}\pm0.003_{\rm syst}$. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07730v1-abstract-full').style.display = 'none'; document.getElementById('2411.07730v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.07140">arXiv:2411.07140</a> <span> [<a href="https://arxiv.org/pdf/2411.07140">pdf</a>, <a href="https://arxiv.org/format/2411.07140">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Chinese SimpleQA: A Chinese Factuality Evaluation for Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=He%2C+Y">Yancheng He</a>, <a href="/search/?searchtype=author&query=Li%2C+S">Shilong Li</a>, <a href="/search/?searchtype=author&query=Liu%2C+J">Jiaheng Liu</a>, <a href="/search/?searchtype=author&query=Tan%2C+Y">Yingshui Tan</a>, <a href="/search/?searchtype=author&query=Wang%2C+W">Weixun Wang</a>, <a href="/search/?searchtype=author&query=Huang%2C+H">Hui Huang</a>, <a href="/search/?searchtype=author&query=Bu%2C+X">Xingyuan Bu</a>, <a href="/search/?searchtype=author&query=Guo%2C+H">Hangyu Guo</a>, <a href="/search/?searchtype=author&query=Hu%2C+C">Chengwei Hu</a>, <a href="/search/?searchtype=author&query=Zheng%2C+B">Boren Zheng</a>, <a href="/search/?searchtype=author&query=Lin%2C+Z">Zhuoran Lin</a>, <a href="/search/?searchtype=author&query=Liu%2C+X">Xuepeng Liu</a>, <a href="/search/?searchtype=author&query=Sun%2C+D">Dekai Sun</a>, <a href="/search/?searchtype=author&query=Lin%2C+S">Shirong Lin</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zhicheng Zheng</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiaoyong Zhu</a>, <a href="/search/?searchtype=author&query=Su%2C+W">Wenbo Su</a>, <a href="/search/?searchtype=author&query=Zheng%2C+B">Bo Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.07140v2-abstract-short" style="display: inline;"> New LLM evaluation benchmarks are important to align with the rapid development of Large Language Models (LLMs). In this work, we present Chinese SimpleQA, the first comprehensive Chinese benchmark to evaluate the factuality ability of language models to answer short questions, and Chinese SimpleQA mainly has five properties (i.e., Chinese, Diverse, High-quality, Static, Easy-to-evaluate). Specifi… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07140v2-abstract-full').style.display = 'inline'; document.getElementById('2411.07140v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.07140v2-abstract-full" style="display: none;"> New LLM evaluation benchmarks are important to align with the rapid development of Large Language Models (LLMs). In this work, we present Chinese SimpleQA, the first comprehensive Chinese benchmark to evaluate the factuality ability of language models to answer short questions, and Chinese SimpleQA mainly has five properties (i.e., Chinese, Diverse, High-quality, Static, Easy-to-evaluate). Specifically, first, we focus on the Chinese language over 6 major topics with 99 diverse subtopics. Second, we conduct a comprehensive quality control process to achieve high-quality questions and answers, where the reference answers are static and cannot be changed over time. Third, following SimpleQA, the questions and answers are very short, and the grading process is easy-to-evaluate based on OpenAI API. Based on Chinese SimpleQA, we perform a comprehensive evaluation on the factuality abilities of existing LLMs. Finally, we hope that Chinese SimpleQA could guide the developers to better understand the Chinese factuality abilities of their models and facilitate the growth of foundation models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07140v2-abstract-full').style.display = 'none'; document.getElementById('2411.07140v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.06794">arXiv:2411.06794</a> <span> [<a href="https://arxiv.org/pdf/2411.06794">pdf</a>, <a href="https://arxiv.org/format/2411.06794">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Mesoscale and Nanoscale Physics">cond-mat.mes-hall</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Statistical Mechanics">cond-mat.stat-mech</span> </div> </div> <p class="title is-5 mathjax"> Emergence of steady quantum transport in a superconducting processor </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zhang%2C+P">Pengfei Zhang</a>, <a href="/search/?searchtype=author&query=Gao%2C+Y">Yu Gao</a>, <a href="/search/?searchtype=author&query=Xu%2C+X">Xiansong Xu</a>, <a href="/search/?searchtype=author&query=Wang%2C+N">Ning Wang</a>, <a href="/search/?searchtype=author&query=Dong%2C+H">Hang Dong</a>, <a href="/search/?searchtype=author&query=Guo%2C+C">Chu Guo</a>, <a href="/search/?searchtype=author&query=Deng%2C+J">Jinfeng Deng</a>, <a href="/search/?searchtype=author&query=Zhang%2C+X">Xu Zhang</a>, <a href="/search/?searchtype=author&query=Chen%2C+J">Jiachen Chen</a>, <a href="/search/?searchtype=author&query=Xu%2C+S">Shibo Xu</a>, <a href="/search/?searchtype=author&query=Wang%2C+K">Ke Wang</a>, <a href="/search/?searchtype=author&query=Wu%2C+Y">Yaozu Wu</a>, <a href="/search/?searchtype=author&query=Zhang%2C+C">Chuanyu Zhang</a>, <a href="/search/?searchtype=author&query=Jin%2C+F">Feitong Jin</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xuhao Zhu</a>, <a href="/search/?searchtype=author&query=Zhang%2C+A">Aosai Zhang</a>, <a href="/search/?searchtype=author&query=Zou%2C+Y">Yiren Zou</a>, <a href="/search/?searchtype=author&query=Tan%2C+Z">Ziqi Tan</a>, <a href="/search/?searchtype=author&query=Cui%2C+Z">Zhengyi Cui</a>, <a href="/search/?searchtype=author&query=Zhu%2C+Z">Zitian Zhu</a>, <a href="/search/?searchtype=author&query=Shen%2C+F">Fanhao Shen</a>, <a href="/search/?searchtype=author&query=Li%2C+T">Tingting Li</a>, <a href="/search/?searchtype=author&query=Zhong%2C+J">Jiarun Zhong</a>, <a href="/search/?searchtype=author&query=Bao%2C+Z">Zehang Bao</a>, <a href="/search/?searchtype=author&query=Zhao%2C+L">Liangtian Zhao</a> , et al. (7 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.06794v1-abstract-short" style="display: inline;"> Non-equilibrium quantum transport is crucial to technological advances ranging from nanoelectronics to thermal management. In essence, it deals with the coherent transfer of energy and (quasi-)particles through quantum channels between thermodynamic baths. A complete understanding of quantum transport thus requires the ability to simulate and probe macroscopic and microscopic physics on equal foot… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06794v1-abstract-full').style.display = 'inline'; document.getElementById('2411.06794v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.06794v1-abstract-full" style="display: none;"> Non-equilibrium quantum transport is crucial to technological advances ranging from nanoelectronics to thermal management. In essence, it deals with the coherent transfer of energy and (quasi-)particles through quantum channels between thermodynamic baths. A complete understanding of quantum transport thus requires the ability to simulate and probe macroscopic and microscopic physics on equal footing. Using a superconducting quantum processor, we demonstrate the emergence of non-equilibrium steady quantum transport by emulating the baths with qubit ladders and realising steady particle currents between the baths. We experimentally show that the currents are independent of the microscopic details of bath initialisation, and their temporal fluctuations decrease rapidly with the size of the baths, emulating those predicted by thermodynamic baths. The above characteristics are experimental evidence of pure-state statistical mechanics and prethermalisation in non-equilibrium many-body quantum systems. Furthermore, by utilising precise controls and measurements with single-site resolution, we demonstrate the capability to tune steady currents by manipulating the macroscopic properties of the baths, including filling and spectral properties. Our investigation paves the way for a new generation of experimental exploration of non-equilibrium quantum transport in strongly correlated quantum matter. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06794v1-abstract-full').style.display = 'none'; document.getElementById('2411.06794v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7 pages, 4 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.05669">arXiv:2411.05669</a> <span> [<a href="https://arxiv.org/pdf/2411.05669">pdf</a>, <a href="https://arxiv.org/format/2411.05669">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Nuclear Experiment">nucl-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Measurement of the $蠄(2S)$ to $J/蠄$ cross-section ratio as a function of centrality in PbPb collisions at $\sqrt{s_{\text{NN}}}$ = 5.02 TeV </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=LHCb+collaboration"> LHCb collaboration</a>, <a href="/search/?searchtype=author&query=Aaij%2C+R">R. Aaij</a>, <a href="/search/?searchtype=author&query=Abdelmotteleb%2C+A+S+W">A. S. W. Abdelmotteleb</a>, <a href="/search/?searchtype=author&query=Beteta%2C+C+A">C. Abellan Beteta</a>, <a href="/search/?searchtype=author&query=Abudin%C3%A9n%2C+F">F. Abudin茅n</a>, <a href="/search/?searchtype=author&query=Ackernley%2C+T">T. Ackernley</a>, <a href="/search/?searchtype=author&query=Adefisoye%2C+A+A">A. A. Adefisoye</a>, <a href="/search/?searchtype=author&query=Adeva%2C+B">B. Adeva</a>, <a href="/search/?searchtype=author&query=Adinolfi%2C+M">M. Adinolfi</a>, <a href="/search/?searchtype=author&query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&query=Agapopoulou%2C+C">C. Agapopoulou</a>, <a href="/search/?searchtype=author&query=Aidala%2C+C+A">C. A. Aidala</a>, <a href="/search/?searchtype=author&query=Ajaltouni%2C+Z">Z. Ajaltouni</a>, <a href="/search/?searchtype=author&query=Akar%2C+S">S. Akar</a>, <a href="/search/?searchtype=author&query=Akiba%2C+K">K. Akiba</a>, <a href="/search/?searchtype=author&query=Albicocco%2C+P">P. Albicocco</a>, <a href="/search/?searchtype=author&query=Albrecht%2C+J">J. Albrecht</a>, <a href="/search/?searchtype=author&query=Alessio%2C+F">F. Alessio</a>, <a href="/search/?searchtype=author&query=Alexander%2C+M">M. Alexander</a>, <a href="/search/?searchtype=author&query=Aliouche%2C+Z">Z. Aliouche</a>, <a href="/search/?searchtype=author&query=Cartelle%2C+P+A">P. Alvarez Cartelle</a>, <a href="/search/?searchtype=author&query=Amalric%2C+R">R. Amalric</a>, <a href="/search/?searchtype=author&query=Amato%2C+S">S. Amato</a>, <a href="/search/?searchtype=author&query=Amey%2C+J+L">J. L. Amey</a>, <a href="/search/?searchtype=author&query=Amhis%2C+Y">Y. Amhis</a> , et al. (1128 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.05669v1-abstract-short" style="display: inline;"> The dissociation of quarkonium states with different binding energies produced in heavy-ion collisions is a powerful probe for investigating the formation and properties of the quark-gluon plasma. The ratio of production cross-sections of $蠄(2S)$ and $J/蠄$ mesons times the ratio of their branching fractions into the dimuon final state is measured as a function of centrality using data collected by… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05669v1-abstract-full').style.display = 'inline'; document.getElementById('2411.05669v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.05669v1-abstract-full" style="display: none;"> The dissociation of quarkonium states with different binding energies produced in heavy-ion collisions is a powerful probe for investigating the formation and properties of the quark-gluon plasma. The ratio of production cross-sections of $蠄(2S)$ and $J/蠄$ mesons times the ratio of their branching fractions into the dimuon final state is measured as a function of centrality using data collected by the LHCb detector in PbPb collisions at $\sqrt{s_{\text{NN}}}$ = 5.02 TeV. The measured ratio shows no dependence on the collision centrality, and is compared to the latest theory predictions and to the recent measurements in literature. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05669v1-abstract-full').style.display = 'none'; document.getElementById('2411.05669v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">All figures and tables, along with any supplementary material and additional information, are available at https://cern.ch/lhcbproject/Publications/p/LHCb-PAPER-2024-041.html (LHCb public pages)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> CERN-EP-2024-272, LHCb-PAPER-2024-041 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.05172">arXiv:2411.05172</a> <span> [<a href="https://arxiv.org/pdf/2411.05172">pdf</a>, <a href="https://arxiv.org/format/2411.05172">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> ImpScore: A Learnable Metric For Quantifying The Implicitness Level of Language </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Wang%2C+Y">Yuxin Wang</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiaomeng Zhu</a>, <a href="/search/?searchtype=author&query=Lyu%2C+W">Weimin Lyu</a>, <a href="/search/?searchtype=author&query=Hassanpour%2C+S">Saeed Hassanpour</a>, <a href="/search/?searchtype=author&query=Vosoughi%2C+S">Soroush Vosoughi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.05172v1-abstract-short" style="display: inline;"> Handling implicit language is essential for natural language processing systems to achieve precise text understanding and facilitate natural interactions with users. Despite its importance, the absence of a robust metric for accurately measuring the implicitness of language significantly constrains the depth of analysis possible in evaluating models' comprehension capabilities. This paper addresse… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05172v1-abstract-full').style.display = 'inline'; document.getElementById('2411.05172v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.05172v1-abstract-full" style="display: none;"> Handling implicit language is essential for natural language processing systems to achieve precise text understanding and facilitate natural interactions with users. Despite its importance, the absence of a robust metric for accurately measuring the implicitness of language significantly constrains the depth of analysis possible in evaluating models' comprehension capabilities. This paper addresses this gap by developing a scalar metric that quantifies the implicitness level of language without relying on external references. Drawing on principles from traditional linguistics, we define ''implicitness'' as the divergence between semantic meaning and pragmatic interpretation. To operationalize this definition, we introduce ImpScore, a novel, reference-free metric formulated through an interpretable regression model. This model is trained using pairwise contrastive learning on a specially curated dataset comprising $112,580$ (implicit sentence, explicit sentence) pairs. We validate ImpScore through a user study that compares its assessments with human evaluations on out-of-distribution data, demonstrating its accuracy and strong correlation with human judgments. Additionally, we apply ImpScore to hate speech detection datasets, illustrating its utility and highlighting significant limitations in current large language models' ability to understand highly implicit content. The metric model and its training data are available at https://github.com/audreycs/ImpScore. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05172v1-abstract-full').style.display = 'none'; document.getElementById('2411.05172v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.04469">arXiv:2411.04469</a> <span> [<a href="https://arxiv.org/pdf/2411.04469">pdf</a>, <a href="https://arxiv.org/format/2411.04469">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> FreeCap: Hybrid Calibration-Free Motion Capture in Open Environments </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Xue%2C+A">Aoru Xue</a>, <a href="/search/?searchtype=author&query=Ren%2C+Y">Yiming Ren</a>, <a href="/search/?searchtype=author&query=Song%2C+Z">Zining Song</a>, <a href="/search/?searchtype=author&query=Ye%2C+M">Mao Ye</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xinge Zhu</a>, <a href="/search/?searchtype=author&query=Ma%2C+Y">Yuexin Ma</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.04469v1-abstract-short" style="display: inline;"> We propose a novel hybrid calibration-free method FreeCap to accurately capture global multi-person motions in open environments. Our system combines a single LiDAR with expandable moving cameras, allowing for flexible and precise motion estimation in a unified world coordinate. In particular, We introduce a local-to-global pose-aware cross-sensor human-matching module that predicts the alignment… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04469v1-abstract-full').style.display = 'inline'; document.getElementById('2411.04469v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.04469v1-abstract-full" style="display: none;"> We propose a novel hybrid calibration-free method FreeCap to accurately capture global multi-person motions in open environments. Our system combines a single LiDAR with expandable moving cameras, allowing for flexible and precise motion estimation in a unified world coordinate. In particular, We introduce a local-to-global pose-aware cross-sensor human-matching module that predicts the alignment among each sensor, even in the absence of calibration. Additionally, our coarse-to-fine sensor-expandable pose optimizer further optimizes the 3D human key points and the alignments, it is also capable of incorporating additional cameras to enhance accuracy. Extensive experiments on Human-M3 and FreeMotion datasets demonstrate that our method significantly outperforms state-of-the-art single-modal methods, offering an expandable and efficient solution for multi-person motion capture across various applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04469v1-abstract-full').style.display = 'none'; document.getElementById('2411.04469v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.04428">arXiv:2411.04428</a> <span> [<a href="https://arxiv.org/pdf/2411.04428">pdf</a>, <a href="https://arxiv.org/format/2411.04428">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> DexH2R: Task-oriented Dexterous Manipulation from Human to Robots </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zhao%2C+S">Shuqi Zhao</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xinghao Zhu</a>, <a href="/search/?searchtype=author&query=Chen%2C+Y">Yuxin Chen</a>, <a href="/search/?searchtype=author&query=Li%2C+C">Chenran Li</a>, <a href="/search/?searchtype=author&query=Zhang%2C+X">Xiang Zhang</a>, <a href="/search/?searchtype=author&query=Ding%2C+M">Mingyu Ding</a>, <a href="/search/?searchtype=author&query=Tomizuka%2C+M">Masayoshi Tomizuka</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.04428v1-abstract-short" style="display: inline;"> Dexterous manipulation is a critical aspect of human capability, enabling interaction with a wide variety of objects. Recent advancements in learning from human demonstrations and teleoperation have enabled progress for robots in such ability. However, these approaches either require complex data collection such as costly human effort for eye-robot contact, or suffer from poor generalization when… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04428v1-abstract-full').style.display = 'inline'; document.getElementById('2411.04428v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.04428v1-abstract-full" style="display: none;"> Dexterous manipulation is a critical aspect of human capability, enabling interaction with a wide variety of objects. Recent advancements in learning from human demonstrations and teleoperation have enabled progress for robots in such ability. However, these approaches either require complex data collection such as costly human effort for eye-robot contact, or suffer from poor generalization when faced with novel scenarios. To solve both challenges, we propose a framework, DexH2R, that combines human hand motion retargeting with a task-oriented residual action policy, improving task performance by bridging the embodiment gap between human and robotic dexterous hands. Specifically, DexH2R learns the residual policy directly from retargeted primitive actions and task-oriented rewards, eliminating the need for labor-intensive teleoperation systems. Moreover, we incorporate test-time guidance for novel scenarios by taking in desired trajectories of human hands and objects, allowing the dexterous hand to acquire new skills with high generalizability. Extensive experiments in both simulation and real-world environments demonstrate the effectiveness of our work, outperforming prior state-of-the-arts by 40% across various settings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04428v1-abstract-full').style.display = 'none'; document.getElementById('2411.04428v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.04395">arXiv:2411.04395</a> <span> [<a href="https://arxiv.org/pdf/2411.04395">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> </div> <p class="title is-5 mathjax"> Integrated electro-optic digital-to-analog link for efficient computing and arbitrary waveform generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Song%2C+Y">Yunxiang Song</a>, <a href="/search/?searchtype=author&query=Hu%2C+Y">Yaowen Hu</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xinrui Zhu</a>, <a href="/search/?searchtype=author&query=Powell%2C+K">Keith Powell</a>, <a href="/search/?searchtype=author&query=Magalh%C3%A3es%2C+L">Let铆cia Magalh茫es</a>, <a href="/search/?searchtype=author&query=Ye%2C+F">Fan Ye</a>, <a href="/search/?searchtype=author&query=Warner%2C+H">Hana Warner</a>, <a href="/search/?searchtype=author&query=Lu%2C+S">Shengyuan Lu</a>, <a href="/search/?searchtype=author&query=Li%2C+X">Xudong Li</a>, <a href="/search/?searchtype=author&query=Renaud%2C+D">Dylan Renaud</a>, <a href="/search/?searchtype=author&query=Lippok%2C+N">Norman Lippok</a>, <a href="/search/?searchtype=author&query=Zhu%2C+D">Di Zhu</a>, <a href="/search/?searchtype=author&query=Vakoc%2C+B">Benjamin Vakoc</a>, <a href="/search/?searchtype=author&query=Zhang%2C+M">Mian Zhang</a>, <a href="/search/?searchtype=author&query=Sinclair%2C+N">Neil Sinclair</a>, <a href="/search/?searchtype=author&query=Lon%C4%8Dar%2C+M">Marko Lon膷ar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.04395v1-abstract-short" style="display: inline;"> The rapid growth in artificial intelligence and modern communication systems demands innovative solutions for increased computational power and advanced signaling capabilities. Integrated photonics, leveraging the analog nature of electromagnetic waves at the chip scale, offers a promising complement to approaches based on digital electronics. To fully unlock their potential as analog processors,… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04395v1-abstract-full').style.display = 'inline'; document.getElementById('2411.04395v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.04395v1-abstract-full" style="display: none;"> The rapid growth in artificial intelligence and modern communication systems demands innovative solutions for increased computational power and advanced signaling capabilities. Integrated photonics, leveraging the analog nature of electromagnetic waves at the chip scale, offers a promising complement to approaches based on digital electronics. To fully unlock their potential as analog processors, establishing a common technological base between conventional digital electronic systems and analog photonics is imperative to building next-generation computing and communications hardware. However, the absence of an efficient interface has critically challenged comprehensive demonstrations of analog advantage thus far, with the scalability, speed, and energy consumption as primary bottlenecks. Here, we address this challenge and demonstrate a general electro-optic digital-to-analog link (EO-DiAL) enabled by foundry-based lithium niobate nanophotonics. Using purely digital inputs, we achieve on-demand generation of (i) optical and (ii) electronic waveforms at information rates up to 186 Gbit/s. The former addresses the digital-to-analog electro-optic conversion challenge in photonic computing, showcasing high-fidelity MNIST encoding while consuming 0.058 pJ/bit. The latter enables a pulse-shaping-free microwave arbitrary waveform generation method with ultrabroadband tunable delay and gain. Our results pave the way for efficient and compact digital-to-analog conversion paradigms enabled by integrated photonics and underscore the transformative impact analog photonic hardware may have on various applications, such as computing, optical interconnects, and high-speed ranging. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04395v1-abstract-full').style.display = 'none'; document.getElementById('2411.04395v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.03399">arXiv:2411.03399</a> <span> [<a href="https://arxiv.org/pdf/2411.03399">pdf</a>, <a href="https://arxiv.org/format/2411.03399">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Study of $D_{s1}(2460)^{+}\to D_{s}^{+}蟺^{+}蟺^{-}$ in $B\to {\bar{D}}^{(*)}D_{s}^{+}蟺^{+}蟺^{-}$ decays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=LHCb+collaboration"> LHCb collaboration</a>, <a href="/search/?searchtype=author&query=Aaij%2C+R">R. Aaij</a>, <a href="/search/?searchtype=author&query=Abdelmotteleb%2C+A+S+W">A. S. W. Abdelmotteleb</a>, <a href="/search/?searchtype=author&query=Beteta%2C+C+A">C. Abellan Beteta</a>, <a href="/search/?searchtype=author&query=Abudin%C3%A9n%2C+F">F. Abudin茅n</a>, <a href="/search/?searchtype=author&query=Ackernley%2C+T">T. Ackernley</a>, <a href="/search/?searchtype=author&query=Adefisoye%2C+A+A">A. A. Adefisoye</a>, <a href="/search/?searchtype=author&query=Adeva%2C+B">B. Adeva</a>, <a href="/search/?searchtype=author&query=Adinolfi%2C+M">M. Adinolfi</a>, <a href="/search/?searchtype=author&query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&query=Agapopoulou%2C+C">C. Agapopoulou</a>, <a href="/search/?searchtype=author&query=Aidala%2C+C+A">C. A. Aidala</a>, <a href="/search/?searchtype=author&query=Ajaltouni%2C+Z">Z. Ajaltouni</a>, <a href="/search/?searchtype=author&query=Akar%2C+S">S. Akar</a>, <a href="/search/?searchtype=author&query=Akiba%2C+K">K. Akiba</a>, <a href="/search/?searchtype=author&query=Albicocco%2C+P">P. Albicocco</a>, <a href="/search/?searchtype=author&query=Albrecht%2C+J">J. Albrecht</a>, <a href="/search/?searchtype=author&query=Alessio%2C+F">F. Alessio</a>, <a href="/search/?searchtype=author&query=Alexander%2C+M">M. Alexander</a>, <a href="/search/?searchtype=author&query=Aliouche%2C+Z">Z. Aliouche</a>, <a href="/search/?searchtype=author&query=Cartelle%2C+P+A">P. Alvarez Cartelle</a>, <a href="/search/?searchtype=author&query=Amalric%2C+R">R. Amalric</a>, <a href="/search/?searchtype=author&query=Amato%2C+S">S. Amato</a>, <a href="/search/?searchtype=author&query=Amey%2C+J+L">J. L. Amey</a>, <a href="/search/?searchtype=author&query=Amhis%2C+Y">Y. Amhis</a> , et al. (1124 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.03399v1-abstract-short" style="display: inline;"> An amplitude analysis of the $D_{s1}(2460)^+\to D_{s}^{+}蟺^{+}蟺^{-}$ transition is performed simultaneously in $B^{0}\to D^{-}D_{s}^{+}蟺^{+}蟺^{-}$, $B^{+}\to{\bar{D}}^{0} D_{s}^{+}蟺^{+}蟺^{-}$, and $B^{0}\to D^{*-}D_{s}^{+}蟺^{+}蟺^{-}$ decays. The study is based on a data sample of proton-proton collisions recorded with the LHCb detector at centre-of-mass energies of $\sqrt{s}=7,8,$ and $13\,$TeV, c… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03399v1-abstract-full').style.display = 'inline'; document.getElementById('2411.03399v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.03399v1-abstract-full" style="display: none;"> An amplitude analysis of the $D_{s1}(2460)^+\to D_{s}^{+}蟺^{+}蟺^{-}$ transition is performed simultaneously in $B^{0}\to D^{-}D_{s}^{+}蟺^{+}蟺^{-}$, $B^{+}\to{\bar{D}}^{0} D_{s}^{+}蟺^{+}蟺^{-}$, and $B^{0}\to D^{*-}D_{s}^{+}蟺^{+}蟺^{-}$ decays. The study is based on a data sample of proton-proton collisions recorded with the LHCb detector at centre-of-mass energies of $\sqrt{s}=7,8,$ and $13\,$TeV, corresponding to a total integrated luminosity of $9\,\rm{fb}^{-1}$. A clear double-peak structure is observed in the $m(蟺^{+}蟺^{-})$ spectrum of the $D_{s1}(2460)^{+}\to D_{s}^{+}蟺^{+}蟺^{-}$ decay. The data can be described either with a model including $f_0(500)$, $f_0(980)$ and $f_2(1270)$ resonances, in which the contributions of $f_0(980)$ and $f_2(1270)$ are unexpectedly large, or with a model including $f_0(500)$, a doubly charged open-charm tetraquark state $T_{c\bar{s}}^{++}$ and its isospin partner $T_{c\bar{s}}^{0}$. If the former is considered implausible, the $T_{c\bar{s}}$ states are observed with high significance, and the data are consistent with isospin symmetry. When imposing isospin constraints between the two $T_{c\bar{s}}$ states, their mass and width are determined to be $2327\pm13\pm13\,$MeV and $96\pm16\,^{+170}_{-23}\,$MeV, respectively, where the first uncertainty is statistical and the second is systematic. The mass is slightly below the $DK$ threshold, and a spin-parity of $0^+$ is favoured with high significance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03399v1-abstract-full').style.display = 'none'; document.getElementById('2411.03399v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">All figures and tables, along with machine-readable versions and any supplementary material and additional information, are available at https://lbfence.cern.ch/alcm/public/analysis/full-details/3280/ (LHCb public pages)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> LHCb-PAPER-2024-033, CERN-EP-2024-264 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.03260">arXiv:2411.03260</a> <span> [<a href="https://arxiv.org/pdf/2411.03260">pdf</a>, <a href="https://arxiv.org/format/2411.03260">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> ShadowMamba: State-Space Model with Boundary-Region Selective Scan for Shadow Removal </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiujin Zhu</a>, <a href="/search/?searchtype=author&query=Chow%2C+C">Chee-Onn Chow</a>, <a href="/search/?searchtype=author&query=Chuah%2C+J+H">Joon Huang Chuah</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.03260v1-abstract-short" style="display: inline;"> Image shadow removal is a typical low-level vision problem, where the presence of shadows leads to abrupt changes in brightness in certain regions, affecting the accuracy of upstream tasks. Current shadow removal methods still face challenges such as residual boundary artifacts, and capturing feature information at shadow boundaries is crucial for removing shadows and eliminating residual boundary… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03260v1-abstract-full').style.display = 'inline'; document.getElementById('2411.03260v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.03260v1-abstract-full" style="display: none;"> Image shadow removal is a typical low-level vision problem, where the presence of shadows leads to abrupt changes in brightness in certain regions, affecting the accuracy of upstream tasks. Current shadow removal methods still face challenges such as residual boundary artifacts, and capturing feature information at shadow boundaries is crucial for removing shadows and eliminating residual boundary artifacts. Recently, Mamba has achieved remarkable success in computer vision by globally modeling long-sequence information with linear complexity. However, when applied to image shadow removal, the original Mamba scanning method overlooks the semantic continuity of shadow boundaries as well as the continuity of semantics within the same region. Based on the unique characteristics of shadow images, this paper proposes a novel selective scanning method called boundary-region selective scanning. This method scans boundary regions, shadow regions, and non-shadow regions independently, bringing pixels of the same region type closer together in the long sequence, especially focusing on the local information at the boundaries, which is crucial for shadow removal. This method combines with global scanning and channel scanning to jointly accomplish the shadow removal. We name our model ShadowMamba, the first Mamba-based model for shadow removal. Extensive experimental results show that our method outperforms current state-of-the-art models across most metrics on multiple datasets. The code for ShadowMamba is available at (Code will be released upon acceptance). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03260v1-abstract-full').style.display = 'none'; document.getElementById('2411.03260v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.03223">arXiv:2411.03223</a> <span> [<a href="https://arxiv.org/pdf/2411.03223">pdf</a>, <a href="https://arxiv.org/format/2411.03223">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Beyond Grid Data: Exploring Graph Neural Networks for Earth Observation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zhao%2C+S">Shan Zhao</a>, <a href="/search/?searchtype=author&query=Chen%2C+Z">Zhaiyu Chen</a>, <a href="/search/?searchtype=author&query=Xiong%2C+Z">Zhitong Xiong</a>, <a href="/search/?searchtype=author&query=Shi%2C+Y">Yilei Shi</a>, <a href="/search/?searchtype=author&query=Saha%2C+S">Sudipan Saha</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X+X">Xiao Xiang Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.03223v2-abstract-short" style="display: inline;"> Earth Observation (EO) data analysis has been significantly revolutionized by deep learning (DL), with applications typically limited to grid-like data structures. Graph Neural Networks (GNNs) emerge as an important innovation, propelling DL into the non-Euclidean domain. Naturally, GNNs can effectively tackle the challenges posed by diverse modalities, multiple sensors, and the heterogeneous natu… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03223v2-abstract-full').style.display = 'inline'; document.getElementById('2411.03223v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.03223v2-abstract-full" style="display: none;"> Earth Observation (EO) data analysis has been significantly revolutionized by deep learning (DL), with applications typically limited to grid-like data structures. Graph Neural Networks (GNNs) emerge as an important innovation, propelling DL into the non-Euclidean domain. Naturally, GNNs can effectively tackle the challenges posed by diverse modalities, multiple sensors, and the heterogeneous nature of EO data. To introduce GNNs in the related domains, our review begins by offering fundamental knowledge on GNNs. Then, we summarize the generic problems in EO, to which GNNs can offer potential solutions. Following this, we explore a broad spectrum of GNNs' applications to scientific problems in Earth systems, covering areas such as weather and climate analysis, disaster management, air quality monitoring, agriculture, land cover classification, hydrological process modeling, and urban modeling. The rationale behind adopting GNNs in these fields is explained, alongside methodologies for organizing graphs and designing favorable architectures for various tasks. Furthermore, we highlight methodological challenges of implementing GNNs in these domains and possible solutions that could guide future research. While acknowledging that GNNs are not a universal solution, we conclude the paper by comparing them with other popular architectures like transformers and analyzing their potential synergies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03223v2-abstract-full').style.display = 'none'; document.getElementById('2411.03223v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication in Geoscience and Remote Sensing Magazine (GRSM)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.03019">arXiv:2411.03019</a> <span> [<a href="https://arxiv.org/pdf/2411.03019">pdf</a>, <a href="https://arxiv.org/format/2411.03019">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> FEDLAD: Federated Evaluation of Deep Leakage Attacks and Defenses </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Baglin%2C+I">Isaac Baglin</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiatian Zhu</a>, <a href="/search/?searchtype=author&query=Hadfield%2C+S">Simon Hadfield</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.03019v1-abstract-short" style="display: inline;"> Federated Learning is a privacy preserving decentralized machine learning paradigm designed to collaboratively train models across multiple clients by exchanging gradients to the server and keeping private data local. Nevertheless, recent research has revealed that the security of Federated Learning is compromised, as private ground truth data can be recovered through a gradient inversion techniqu… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03019v1-abstract-full').style.display = 'inline'; document.getElementById('2411.03019v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.03019v1-abstract-full" style="display: none;"> Federated Learning is a privacy preserving decentralized machine learning paradigm designed to collaboratively train models across multiple clients by exchanging gradients to the server and keeping private data local. Nevertheless, recent research has revealed that the security of Federated Learning is compromised, as private ground truth data can be recovered through a gradient inversion technique known as Deep Leakage. While these attacks are crafted with a focus on applications in Federated Learning, they generally are not evaluated in realistic scenarios. This paper introduces the FEDLAD Framework (Federated Evaluation of Deep Leakage Attacks and Defenses), a comprehensive benchmark for evaluating Deep Leakage attacks and defenses within a realistic Federated context. By implementing a unified benchmark that encompasses multiple state-of-the-art Deep Leakage techniques and various defense strategies, our framework facilitates the evaluation and comparison of the efficacy of these methods across different datasets and training states. This work highlights a crucial trade-off between privacy and model accuracy in Federated Learning and aims to advance the understanding of security challenges in decentralized machine learning systems, stimulate future research, and enhance reproducibility in evaluating Deep Leakage attacks and defenses. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03019v1-abstract-full').style.display = 'none'; document.getElementById('2411.03019v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.11; I.4.5 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02979">arXiv:2411.02979</a> <span> [<a href="https://arxiv.org/pdf/2411.02979">pdf</a>, <a href="https://arxiv.org/format/2411.02979">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/s11704-024-40417-7">10.1007/s11704-024-40417-7 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> CAD-NeRF: Learning NeRFs from Uncalibrated Few-view Images by CAD Model Retrieval </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Wen%2C+X">Xin Wen</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xuening Zhu</a>, <a href="/search/?searchtype=author&query=Yi%2C+R">Renjiao Yi</a>, <a href="/search/?searchtype=author&query=Wang%2C+Z">Zhifeng Wang</a>, <a href="/search/?searchtype=author&query=Zhu%2C+C">Chenyang Zhu</a>, <a href="/search/?searchtype=author&query=Xu%2C+K">Kai Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02979v1-abstract-short" style="display: inline;"> Reconstructing from multi-view images is a longstanding problem in 3D vision, where neural radiance fields (NeRFs) have shown great potential and get realistic rendered images of novel views. Currently, most NeRF methods either require accurate camera poses or a large number of input images, or even both. Reconstructing NeRF from few-view images without poses is challenging and highly ill-posed. T… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02979v1-abstract-full').style.display = 'inline'; document.getElementById('2411.02979v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02979v1-abstract-full" style="display: none;"> Reconstructing from multi-view images is a longstanding problem in 3D vision, where neural radiance fields (NeRFs) have shown great potential and get realistic rendered images of novel views. Currently, most NeRF methods either require accurate camera poses or a large number of input images, or even both. Reconstructing NeRF from few-view images without poses is challenging and highly ill-posed. To address this problem, we propose CAD-NeRF, a method reconstructed from less than 10 images without any known poses. Specifically, we build a mini library of several CAD models from ShapeNet and render them from many random views. Given sparse-view input images, we run a model and pose retrieval from the library, to get a model with similar shapes, serving as the density supervision and pose initializations. Here we propose a multi-view pose retrieval method to avoid pose conflicts among views, which is a new and unseen problem in uncalibrated NeRF methods. Then, the geometry of the object is trained by the CAD guidance. The deformation of the density field and camera poses are optimized jointly. Then texture and density are trained and fine-tuned as well. All training phases are in self-supervised manners. Comprehensive evaluations of synthetic and real images show that CAD-NeRF successfully learns accurate densities with a large deformation from retrieved CAD models, showing the generalization abilities. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02979v1-abstract-full').style.display = 'none'; document.getElementById('2411.02979v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The article has been accepted by Frontiers of Computer Science (FCS)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02734">arXiv:2411.02734</a> <span> [<a href="https://arxiv.org/pdf/2411.02734">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> </div> </div> <p class="title is-5 mathjax"> Integrated lithium niobate photonic computing circuit based on efficient and high-speed electro-optic conversion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Hu%2C+Y">Yaowen Hu</a>, <a href="/search/?searchtype=author&query=Song%2C+Y">Yunxiang Song</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xinrui Zhu</a>, <a href="/search/?searchtype=author&query=Guo%2C+X">Xiangwen Guo</a>, <a href="/search/?searchtype=author&query=Lu%2C+S">Shengyuan Lu</a>, <a href="/search/?searchtype=author&query=Zhang%2C+Q">Qihang Zhang</a>, <a href="/search/?searchtype=author&query=He%2C+L">Lingyan He</a>, <a href="/search/?searchtype=author&query=Franken%2C+C+A+A">C. A. A. Franken</a>, <a href="/search/?searchtype=author&query=Powell%2C+K">Keith Powell</a>, <a href="/search/?searchtype=author&query=Warner%2C+H">Hana Warner</a>, <a href="/search/?searchtype=author&query=Assumpcao%2C+D">Daniel Assumpcao</a>, <a href="/search/?searchtype=author&query=Renaud%2C+D">Dylan Renaud</a>, <a href="/search/?searchtype=author&query=Wang%2C+Y">Ying Wang</a>, <a href="/search/?searchtype=author&query=Magalh%C3%A3es%2C+L">Let铆cia Magalh茫es</a>, <a href="/search/?searchtype=author&query=Rosborough%2C+V">Victoria Rosborough</a>, <a href="/search/?searchtype=author&query=Shams-Ansari%2C+A">Amirhassan Shams-Ansari</a>, <a href="/search/?searchtype=author&query=Li%2C+X">Xudong Li</a>, <a href="/search/?searchtype=author&query=Cheng%2C+R">Rebecca Cheng</a>, <a href="/search/?searchtype=author&query=Luke%2C+K">Kevin Luke</a>, <a href="/search/?searchtype=author&query=Yang%2C+K">Kiyoul Yang</a>, <a href="/search/?searchtype=author&query=Barbastathis%2C+G">George Barbastathis</a>, <a href="/search/?searchtype=author&query=Zhang%2C+M">Mian Zhang</a>, <a href="/search/?searchtype=author&query=Zhu%2C+D">Di Zhu</a>, <a href="/search/?searchtype=author&query=Johansson%2C+L">Leif Johansson</a>, <a href="/search/?searchtype=author&query=Beling%2C+A">Andreas Beling</a> , et al. (2 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02734v1-abstract-short" style="display: inline;"> Here we show a photonic computing accelerator utilizing a system-level thin-film lithium niobate circuit which overcomes this limitation. Leveraging the strong electro-optic (Pockels) effect and the scalability of this platform, we demonstrate photonic computation at speeds up to 1.36 TOPS while consuming 0.057 pJ/OP. Our system features more than 100 thin-film lithium niobate high-performance com… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02734v1-abstract-full').style.display = 'inline'; document.getElementById('2411.02734v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02734v1-abstract-full" style="display: none;"> Here we show a photonic computing accelerator utilizing a system-level thin-film lithium niobate circuit which overcomes this limitation. Leveraging the strong electro-optic (Pockels) effect and the scalability of this platform, we demonstrate photonic computation at speeds up to 1.36 TOPS while consuming 0.057 pJ/OP. Our system features more than 100 thin-film lithium niobate high-performance components working synergistically, surpassing state-of-the-art systems on this platform. We further demonstrate binary-classification, handwritten-digit classification, and image classification with remarkable accuracy, showcasing our system's capability of executing real algorithms. Finally, we investigate the opportunities offered by combining our system with a hybrid-integrated distributed feedback laser source and a heterogeneous-integrated modified uni-traveling carrier photodiode. Our results illustrate the promise of thin-film lithium niobate as a computational platform, addressing current bottlenecks in both electronic and photonic computation. Its unique properties of high-performance electro-optic weight encoding and conversion, wafer-scale scalability, and compatibility with integrated lasers and detectors, position thin-film lithium niobate photonics as a valuable complement to silicon photonics, with extensions to applications in ultrafast and power-efficient signal processing and ranging. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02734v1-abstract-full').style.display = 'none'; document.getElementById('2411.02734v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02279">arXiv:2411.02279</a> <span> [<a href="https://arxiv.org/pdf/2411.02279">pdf</a>, <a href="https://arxiv.org/format/2411.02279">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> ELU-GCN: Effectively Label-Utilizing Graph Convolutional Network </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Huang%2C+J">Jincheng Huang</a>, <a href="/search/?searchtype=author&query=Mo%2C+Y">Yujie Mo</a>, <a href="/search/?searchtype=author&query=Shi%2C+X">Xiaoshuang Shi</a>, <a href="/search/?searchtype=author&query=Feng%2C+L">Lei Feng</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiaofeng Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02279v1-abstract-short" style="display: inline;"> The message-passing mechanism of graph convolutional networks (i.e., GCNs) enables label information to be propagated to a broader range of neighbors, thereby increasing the utilization of labels. However, the label information is not always effectively utilized in the traditional GCN framework. To address this issue, we propose a new two-step framework called ELU-GCN. In the first stage, ELU-GCN… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02279v1-abstract-full').style.display = 'inline'; document.getElementById('2411.02279v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02279v1-abstract-full" style="display: none;"> The message-passing mechanism of graph convolutional networks (i.e., GCNs) enables label information to be propagated to a broader range of neighbors, thereby increasing the utilization of labels. However, the label information is not always effectively utilized in the traditional GCN framework. To address this issue, we propose a new two-step framework called ELU-GCN. In the first stage, ELU-GCN conducts graph learning to learn a new graph structure (\ie ELU-graph), which enables GCNs to effectively utilize label information. In the second stage, we design a new graph contrastive learning on the GCN framework for representation learning by exploring the consistency and mutually exclusive information between the learned ELU graph and the original graph. Moreover, we theoretically demonstrate that the proposed method can ensure the generalization ability of GCNs. Extensive experiments validate the superiority of the proposed method. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02279v1-abstract-full').style.display = 'none'; document.getElementById('2411.02279v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02236">arXiv:2411.02236</a> <span> [<a href="https://arxiv.org/pdf/2411.02236">pdf</a>, <a href="https://arxiv.org/format/2411.02236">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> 3D Audio-Visual Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Sokolov%2C+A">Artem Sokolov</a>, <a href="/search/?searchtype=author&query=Bhosale%2C+S">Swapnil Bhosale</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiatian Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02236v1-abstract-short" style="display: inline;"> Recognizing the sounding objects in scenes is a longstanding objective in embodied AI, with diverse applications in robotics and AR/VR/MR. To that end, Audio-Visual Segmentation (AVS), taking as condition an audio signal to identify the masks of the target sounding objects in an input image with synchronous camera and microphone sensors, has been recently advanced. However, this paradigm is still… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02236v1-abstract-full').style.display = 'inline'; document.getElementById('2411.02236v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02236v1-abstract-full" style="display: none;"> Recognizing the sounding objects in scenes is a longstanding objective in embodied AI, with diverse applications in robotics and AR/VR/MR. To that end, Audio-Visual Segmentation (AVS), taking as condition an audio signal to identify the masks of the target sounding objects in an input image with synchronous camera and microphone sensors, has been recently advanced. However, this paradigm is still insufficient for real-world operation, as the mapping from 2D images to 3D scenes is missing. To address this fundamental limitation, we introduce a novel research problem, 3D Audio-Visual Segmentation, extending the existing AVS to the 3D output space. This problem poses more challenges due to variations in camera extrinsics, audio scattering, occlusions, and diverse acoustics across sounding object categories. To facilitate this research, we create the very first simulation based benchmark, 3DAVS-S34-O7, providing photorealistic 3D scene environments with grounded spatial audio under single-instance and multi-instance settings, across 34 scenes and 7 object categories. This is made possible by re-purposing the Habitat simulator to generate comprehensive annotations of sounding object locations and corresponding 3D masks. Subsequently, we propose a new approach, EchoSegnet, characterized by integrating the ready-to-use knowledge from pretrained 2D audio-visual foundation models synergistically with 3D visual scene representation through spatial audio-aware mask alignment and refinement. Extensive experiments demonstrate that EchoSegnet can effectively segment sounding objects in 3D space on our new benchmark, representing a significant advancement in the field of embodied AI. Project page: https://surrey-uplab.github.io/research/3d-audio-visual-segmentation/ <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02236v1-abstract-full').style.display = 'none'; document.getElementById('2411.02236v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at the NeurIPS 2024 Workshop on Audio Imagination</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.01155">arXiv:2411.01155</a> <span> [<a href="https://arxiv.org/pdf/2411.01155">pdf</a>, <a href="https://arxiv.org/format/2411.01155">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> HG-Adapter: Improving Pre-Trained Heterogeneous Graph Neural Networks with Dual Adapters </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Mo%2C+Y">Yujie Mo</a>, <a href="/search/?searchtype=author&query=Yu%2C+R">Runpeng Yu</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiaofeng Zhu</a>, <a href="/search/?searchtype=author&query=Wang%2C+X">Xinchao Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.01155v1-abstract-short" style="display: inline;"> The "pre-train, prompt-tuning'' paradigm has demonstrated impressive performance for tuning pre-trained heterogeneous graph neural networks (HGNNs) by mitigating the gap between pre-trained models and downstream tasks. However, most prompt-tuning-based works may face at least two limitations: (i) the model may be insufficient to fit the graph structures well as they are generally ignored in the pr… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01155v1-abstract-full').style.display = 'inline'; document.getElementById('2411.01155v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.01155v1-abstract-full" style="display: none;"> The "pre-train, prompt-tuning'' paradigm has demonstrated impressive performance for tuning pre-trained heterogeneous graph neural networks (HGNNs) by mitigating the gap between pre-trained models and downstream tasks. However, most prompt-tuning-based works may face at least two limitations: (i) the model may be insufficient to fit the graph structures well as they are generally ignored in the prompt-tuning stage, increasing the training error to decrease the generalization ability; and (ii) the model may suffer from the limited labeled data during the prompt-tuning stage, leading to a large generalization gap between the training error and the test error to further affect the model generalization. To alleviate the above limitations, we first derive the generalization error bound for existing prompt-tuning-based methods, and then propose a unified framework that combines two new adapters with potential labeled data extension to improve the generalization of pre-trained HGNN models. Specifically, we design dual structure-aware adapters to adaptively fit task-related homogeneous and heterogeneous structural information. We further design a label-propagated contrastive loss and two self-supervised losses to optimize dual adapters and incorporate unlabeled nodes as potential labeled data. Theoretical analysis indicates that the proposed method achieves a lower generalization error bound than existing methods, thus obtaining superior generalization ability. Comprehensive experiments demonstrate the effectiveness and generalization of the proposed method on different downstream tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01155v1-abstract-full').style.display = 'none'; document.getElementById('2411.01155v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">23 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.01102">arXiv:2411.01102</a> <span> [<a href="https://arxiv.org/pdf/2411.01102">pdf</a>, <a href="https://arxiv.org/format/2411.01102">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> BinEnhance: A Enhancement Framework Based on External Environment Semantics for Binary Code Search </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Wang%2C+Y">Yongpan Wang</a>, <a href="/search/?searchtype=author&query=Li%2C+H">Hong Li</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiaojie Zhu</a>, <a href="/search/?searchtype=author&query=Li%2C+S">Siyuan Li</a>, <a href="/search/?searchtype=author&query=Dong%2C+C">Chaopeng Dong</a>, <a href="/search/?searchtype=author&query=Yang%2C+S">Shouguo Yang</a>, <a href="/search/?searchtype=author&query=Qin%2C+K">Kangyuan Qin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.01102v1-abstract-short" style="display: inline;"> Binary code search plays a crucial role in applications like software reuse detection. Currently, existing models are typically based on either internal code semantics or a combination of function call graphs (CG) and internal code semantics. However, these models have limitations. Internal code semantic models only consider the semantics within the function, ignoring the inter-function semantics,… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01102v1-abstract-full').style.display = 'inline'; document.getElementById('2411.01102v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.01102v1-abstract-full" style="display: none;"> Binary code search plays a crucial role in applications like software reuse detection. Currently, existing models are typically based on either internal code semantics or a combination of function call graphs (CG) and internal code semantics. However, these models have limitations. Internal code semantic models only consider the semantics within the function, ignoring the inter-function semantics, making it difficult to handle situations such as function inlining. The combination of CG and internal code semantics is insufficient for addressing complex real-world scenarios. To address these limitations, we propose BinEnhance, a novel framework designed to leverage the inter-function semantics to enhance the expression of internal code semantics for binary code search. Specifically, BinEnhance constructs an External Environment Semantic Graph (EESG), which establishes a stable and analogous external environment for homologous functions by using different inter-function semantic relations (e.g., call, location, data-co-use). After the construction of EESG, we utilize the embeddings generated by existing internal code semantic models to initialize nodes of EESG. Finally, we design a Semantic Enhancement Model (SEM) that uses Relational Graph Convolutional Networks (RGCNs) and a residual block to learn valuable external semantics on the EESG for generating the enhanced semantics embedding. In addition, BinEnhance utilizes data feature similarity to refine the cosine similarity of semantic embeddings. We conduct experiments under six different tasks (e.g., under function inlining scenario) and the results illustrate the performance and robustness of BinEnhance. The application of BinEnhance to HermesSim, Asm2vec, TREX, Gemini, and Asteria on two public datasets results in an improvement of Mean Average Precision (MAP) from 53.6% to 69.7%. Moreover, the efficiency increases fourfold. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01102v1-abstract-full').style.display = 'none'; document.getElementById('2411.01102v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by Network and Distributed System Security (NDSS) Symposium 2025 fall cycle</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.00373">arXiv:2411.00373</a> <span> [<a href="https://arxiv.org/pdf/2411.00373">pdf</a>, <a href="https://arxiv.org/format/2411.00373">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Discrete RIS Enhanced Space Shift Keying MIMO System via Reflecting Beamforming Optimization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zhu%2C+X">Xusheng Zhu</a>, <a href="/search/?searchtype=author&query=Wu%2C+Q">Qingqing Wu</a>, <a href="/search/?searchtype=author&query=Chen%2C+W">Wen Chen</a>, <a href="/search/?searchtype=author&query=He%2C+X">Xinyuan He</a>, <a href="/search/?searchtype=author&query=Xu%2C+L">Lexi Xu</a>, <a href="/search/?searchtype=author&query=Zhang%2C+Y">Yaxin Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.00373v1-abstract-short" style="display: inline;"> In this paper, a discrete reconfigurable intelligent surface (RIS)-assisted spatial shift keying (SSK) multiple-input multiple-output (MIMO) scheme is investigated, in which a direct link between the transmitter and the receiver is considered. To improve the reliability of the RIS-SSK-MIMO scheme, we formulate an objective function based on minimizing the average bit error probability (ABEP). Sinc… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.00373v1-abstract-full').style.display = 'inline'; document.getElementById('2411.00373v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.00373v1-abstract-full" style="display: none;"> In this paper, a discrete reconfigurable intelligent surface (RIS)-assisted spatial shift keying (SSK) multiple-input multiple-output (MIMO) scheme is investigated, in which a direct link between the transmitter and the receiver is considered. To improve the reliability of the RIS-SSK-MIMO scheme, we formulate an objective function based on minimizing the average bit error probability (ABEP). Since the reflecting phase shift of RIS is discrete, it is difficult to address this problem directly. To this end, we optimize the RIS phase shift to maximize the Euclidean distance between the minimum constellations by applying the successive convex approximation (SCA) and penaltyalternating optimization method. Simulation results verify the superiority of the proposed RIS-SSK-MIMO scheme and demonstrate the impact of the number of RIS elements, the number of phase quantization bits, and the number of receive and transmit antennas in terms of reliability. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.00373v1-abstract-full').style.display = 'none'; document.getElementById('2411.00373v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.23815">arXiv:2410.23815</a> <span> [<a href="https://arxiv.org/pdf/2410.23815">pdf</a>, <a href="https://arxiv.org/format/2410.23815">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> The NPU-HWC System for the ISCSLP 2024 Inspirational and Convincing Audio Generation Challenge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Guo%2C+D">Dake Guo</a>, <a href="/search/?searchtype=author&query=Yao%2C+J">Jixun Yao</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xinfa Zhu</a>, <a href="/search/?searchtype=author&query=Xia%2C+K">Kangxiang Xia</a>, <a href="/search/?searchtype=author&query=Guo%2C+Z">Zhao Guo</a>, <a href="/search/?searchtype=author&query=Zhang%2C+Z">Ziyu Zhang</a>, <a href="/search/?searchtype=author&query=Wang%2C+Y">Yao Wang</a>, <a href="/search/?searchtype=author&query=Liu%2C+J">Jie Liu</a>, <a href="/search/?searchtype=author&query=Xie%2C+L">Lei Xie</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.23815v1-abstract-short" style="display: inline;"> This paper presents the NPU-HWC system submitted to the ISCSLP 2024 Inspirational and Convincing Audio Generation Challenge 2024 (ICAGC). Our system consists of two modules: a speech generator for Track 1 and a background audio generator for Track 2. In Track 1, we employ Single-Codec to tokenize the speech into discrete tokens and use a language-model-based approach to achieve zero-shot speaking… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.23815v1-abstract-full').style.display = 'inline'; document.getElementById('2410.23815v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.23815v1-abstract-full" style="display: none;"> This paper presents the NPU-HWC system submitted to the ISCSLP 2024 Inspirational and Convincing Audio Generation Challenge 2024 (ICAGC). Our system consists of two modules: a speech generator for Track 1 and a background audio generator for Track 2. In Track 1, we employ Single-Codec to tokenize the speech into discrete tokens and use a language-model-based approach to achieve zero-shot speaking style cloning. The Single-Codec effectively decouples timbre and speaking style at the token level, reducing the acoustic modeling burden on the autoregressive language model. Additionally, we use DSPGAN to upsample 16 kHz mel-spectrograms to high-fidelity 48 kHz waveforms. In Track 2, we propose a background audio generator based on large language models (LLMs). This system produces scene-appropriate accompaniment descriptions, synthesizes background audio with Tango 2, and integrates it with the speech generated by our Track 1 system. Our submission achieves the second place and the first place in Track 1 and Track 2 respectively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.23815v1-abstract-full').style.display = 'none'; document.getElementById('2410.23815v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">accepted by ISCSLP 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.23637">arXiv:2410.23637</a> <span> [<a href="https://arxiv.org/pdf/2410.23637">pdf</a>, <a href="https://arxiv.org/format/2410.23637">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Science and Game Theory">cs.GT</span> </div> </div> <p class="title is-5 mathjax"> Anytime-Constrained Multi-Agent Reinforcement Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=McMahan%2C+J">Jeremy McMahan</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiaojin Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.23637v1-abstract-short" style="display: inline;"> We introduce anytime constraints to the multi-agent setting with the corresponding solution concept being anytime-constrained equilibrium (ACE). Then, we present a comprehensive theory of anytime-constrained Markov games, which includes (1) a computational characterization of feasible policies, (2) a fixed-parameter tractable algorithm for computing ACE, and (3) a polynomial-time algorithm for app… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.23637v1-abstract-full').style.display = 'inline'; document.getElementById('2410.23637v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.23637v1-abstract-full" style="display: none;"> We introduce anytime constraints to the multi-agent setting with the corresponding solution concept being anytime-constrained equilibrium (ACE). Then, we present a comprehensive theory of anytime-constrained Markov games, which includes (1) a computational characterization of feasible policies, (2) a fixed-parameter tractable algorithm for computing ACE, and (3) a polynomial-time algorithm for approximately computing feasible ACE. Since computing a feasible policy is NP-hard even for two-player zero-sum games, our approximation guarantees are the best possible under worst-case analysis. We also develop the first theory of efficient computation for action-constrained Markov games, which may be of independent interest. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.23637v1-abstract-full').style.display = 'none'; document.getElementById('2410.23637v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.22634">arXiv:2410.22634</a> <span> [<a href="https://arxiv.org/pdf/2410.22634">pdf</a>, <a href="https://arxiv.org/format/2410.22634">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Exactly Solvable and Integrable Systems">nlin.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Mathematical Physics">math-ph</span> </div> </div> <p class="title is-5 mathjax"> High-genus KdV soliton gases and their long-time asymptotics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Wang%2C+D">Deng-Shan Wang</a>, <a href="/search/?searchtype=author&query=Zhu%2C+D">Dinghao Zhu</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiaodong Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.22634v1-abstract-short" style="display: inline;"> This paper employs the Riemann-Hilbert problem to provide a comprehensive analysis of the asymptotic behavior of the high-genus Korteweg-de Vries soliton gases. It is demonstrated that the two-genus soliton gas is related to the two-phase Riemann-Theta function as \(x \to +\infty\), and approaches to zero as \(x \to -\infty\). Additionally, the long-time asymptotic behavior of this two-genus solit… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.22634v1-abstract-full').style.display = 'inline'; document.getElementById('2410.22634v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.22634v1-abstract-full" style="display: none;"> This paper employs the Riemann-Hilbert problem to provide a comprehensive analysis of the asymptotic behavior of the high-genus Korteweg-de Vries soliton gases. It is demonstrated that the two-genus soliton gas is related to the two-phase Riemann-Theta function as \(x \to +\infty\), and approaches to zero as \(x \to -\infty\). Additionally, the long-time asymptotic behavior of this two-genus soliton gas can be categorized into five distinct regions in the \(x\)-\(t\) plane, which from left to right are rapidly decay, modulated one-phase wave, unmodulated one-phase wave, modulated two-phase wave, and unmodulated two-phase wave. Moreover, an innovative method is introduced to solve the model problem associated with the high-genus Riemann surface, leading to the determination of the leading terms, which is also related with the multi-phase Riemann-Theta function. A general discussion on the case of arbitrary \(N\)-genus soliton gas is also presented. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.22634v1-abstract-full').style.display = 'none'; document.getElementById('2410.22634v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.22629">arXiv:2410.22629</a> <span> [<a href="https://arxiv.org/pdf/2410.22629">pdf</a>, <a href="https://arxiv.org/format/2410.22629">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> CrossEarth: Geospatial Vision Foundation Model for Domain Generalizable Remote Sensing Semantic Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Gong%2C+Z">Ziyang Gong</a>, <a href="/search/?searchtype=author&query=Wei%2C+Z">Zhixiang Wei</a>, <a href="/search/?searchtype=author&query=Wang%2C+D">Di Wang</a>, <a href="/search/?searchtype=author&query=Ma%2C+X">Xianzheng Ma</a>, <a href="/search/?searchtype=author&query=Chen%2C+H">Hongruixuan Chen</a>, <a href="/search/?searchtype=author&query=Jia%2C+Y">Yuru Jia</a>, <a href="/search/?searchtype=author&query=Deng%2C+Y">Yupeng Deng</a>, <a href="/search/?searchtype=author&query=Ji%2C+Z">Zhenming Ji</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiangwei Zhu</a>, <a href="/search/?searchtype=author&query=Yokoya%2C+N">Naoto Yokoya</a>, <a href="/search/?searchtype=author&query=Zhang%2C+J">Jing Zhang</a>, <a href="/search/?searchtype=author&query=Du%2C+B">Bo Du</a>, <a href="/search/?searchtype=author&query=Zhang%2C+L">Liangpei Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.22629v2-abstract-short" style="display: inline;"> The field of Remote Sensing Domain Generalization (RSDG) has emerged as a critical and valuable research frontier, focusing on developing models that generalize effectively across diverse scenarios. Despite the substantial domain gaps in RS images that are characterized by variabilities such as location, wavelength, and sensor type, research in this area remains underexplored: (1) Current cross-do… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.22629v2-abstract-full').style.display = 'inline'; document.getElementById('2410.22629v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.22629v2-abstract-full" style="display: none;"> The field of Remote Sensing Domain Generalization (RSDG) has emerged as a critical and valuable research frontier, focusing on developing models that generalize effectively across diverse scenarios. Despite the substantial domain gaps in RS images that are characterized by variabilities such as location, wavelength, and sensor type, research in this area remains underexplored: (1) Current cross-domain methods primarily focus on Domain Adaptation (DA), which adapts models to predefined domains rather than to unseen ones; (2) Few studies targeting the RSDG issue, especially for semantic segmentation tasks, where existing models are developed for specific unknown domains, struggling with issues of underfitting on other unknown scenarios; (3) Existing RS foundation models tend to prioritize in-domain performance over cross-domain generalization. To this end, we introduce the first vision foundation model for RSDG semantic segmentation, CrossEarth. CrossEarth demonstrates strong cross-domain generalization through a specially designed data-level Earth-Style Injection pipeline and a model-level Multi-Task Training pipeline. In addition, for the semantic segmentation task, we have curated an RSDG benchmark comprising 28 cross-domain settings across various regions, spectral bands, platforms, and climates, providing a comprehensive framework for testing the generalizability of future RSDG models. Extensive experiments on this benchmark demonstrate the superiority of CrossEarth over existing state-of-the-art methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.22629v2-abstract-full').style.display = 'none'; document.getElementById('2410.22629v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The codes and models will be available at https://github.com/Cuzyoung/CrossEarth</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.21921">arXiv:2410.21921</a> <span> [<a href="https://arxiv.org/pdf/2410.21921">pdf</a>, <a href="https://arxiv.org/format/2410.21921">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Mesoscale and Nanoscale Physics">cond-mat.mes-hall</span> </div> </div> <p class="title is-5 mathjax"> Dispersions and magnetism of strain-induced pseudo Landau levels in Bernal-stacked bilayer graphene </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Liu%2C+T">Tianyu Liu</a>, <a href="/search/?searchtype=author&query=Li%2C+J">Jun-Hong Li</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xingchuan Zhu</a>, <a href="/search/?searchtype=author&query=Guo%2C+H">Huaiming Guo</a>, <a href="/search/?searchtype=author&query=Lu%2C+H">Hai-Zhou Lu</a>, <a href="/search/?searchtype=author&query=Xie%2C+X+C">X. C. Xie</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.21921v1-abstract-short" style="display: inline;"> Elastic strain can displace the massless Dirac fermions in monolayer graphene in a space-dependent fashion, similar to the effect of an external magnetic field, thus giving rise to Landau quantization. We here show that the strain-induced Landau quantization can also take place in Bernal-stacked bilayer graphene, where the low-energy excitations are massive rather than Dirac-like. The zigzag ribbo… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.21921v1-abstract-full').style.display = 'inline'; document.getElementById('2410.21921v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.21921v1-abstract-full" style="display: none;"> Elastic strain can displace the massless Dirac fermions in monolayer graphene in a space-dependent fashion, similar to the effect of an external magnetic field, thus giving rise to Landau quantization. We here show that the strain-induced Landau quantization can also take place in Bernal-stacked bilayer graphene, where the low-energy excitations are massive rather than Dirac-like. The zigzag ribbon of Bernal-stacked bilayer graphene realizes a two-legged Su-Schrieffer-Heeger model with a domain wall, which coincides with the guiding center of the strain-induced pseudo Landau levels. We reduce the lattice model of the ribbon in the vicinity of the guiding center into an exactly solvable coupled Dirac model and analytically derive the dispersions of the strain-induced pseudo Landau levels. Remarkably, the zeroth and first pseudo Landau levels are dispersionless and sublattice-polarized. We elucidate that the interaction on these two pseudo Landau levels results in a global antiferromagnetic order. Our study extends the strain-induced Landau quantization to the massive excitations and indicates strain as a tuning knob of magnetism. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.21921v1-abstract-full').style.display = 'none'; document.getElementById('2410.21921v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages main text + 4.5 pages appendices, 9 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.21841">arXiv:2410.21841</a> <span> [<a href="https://arxiv.org/pdf/2410.21841">pdf</a>, <a href="https://arxiv.org/ps/2410.21841">ps</a>, <a href="https://arxiv.org/format/2410.21841">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Search for $螞$-$\bar螞 $ oscillation in $J/蠄\rightarrow螞\bar螞$ decay </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&query=An%2C+Q">Q. An</a>, <a href="/search/?searchtype=author&query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&query=Briere%2C+R+A">R. A. Briere</a> , et al. (638 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.21841v2-abstract-short" style="display: inline;"> Using $(10087\pm44)\times 10^{6}$ $J/蠄$ decays collected by the BESIII detector at the BEPCII collider, we search for baryon number violation via $螞-\bar螞$ oscillation in the decay $J/蠄\to 螞\bar螞$. No evidence for $螞-\bar螞$ oscillation is observed. The upper limit on the time-integrated probability of $螞-\bar螞$ oscillation is estimated to be $1.4\times 10^{-6}$, corresponding to an oscillation par… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.21841v2-abstract-full').style.display = 'inline'; document.getElementById('2410.21841v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.21841v2-abstract-full" style="display: none;"> Using $(10087\pm44)\times 10^{6}$ $J/蠄$ decays collected by the BESIII detector at the BEPCII collider, we search for baryon number violation via $螞-\bar螞$ oscillation in the decay $J/蠄\to 螞\bar螞$. No evidence for $螞-\bar螞$ oscillation is observed. The upper limit on the time-integrated probability of $螞-\bar螞$ oscillation is estimated to be $1.4\times 10^{-6}$, corresponding to an oscillation parameter less than $2.1\times 10^{-18}~\mathrm{GeV}$ at $90\%$ confidence level. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.21841v2-abstract-full').style.display = 'none'; document.getElementById('2410.21841v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 2 figures</span> </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&query=Zhu%2C+X&start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&query=Zhu%2C+X&start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&query=Zhu%2C+X&start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&query=Zhu%2C+X&start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&query=Zhu%2C+X&start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&query=Zhu%2C+X&start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">…</span></li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>