CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 2,296 results for author: <span class="mathjax">Huang, W</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/" aria-role="search"> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Huang, W"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Huang%2C+W&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Huang, W"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Huang%2C+W&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Huang%2C+W&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Huang%2C+W&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Huang%2C+W&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Huang%2C+W&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Huang%2C+W&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.17061">arXiv:2411.17061</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.17061">pdf</a>, <a href="https://arxiv.org/format/2411.17061">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SCASeg: Strip Cross-Attention for Efficient Semantic Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Xu%2C+G">Guoan Xu</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+J">Jiaming Chen</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wenfeng Huang</a>, <a href="/search/?searchtype=author&amp;query=Jia%2C+W">Wenjing Jia</a>, <a href="/search/?searchtype=author&amp;query=Gao%2C+G">Guangwei Gao</a>, <a href="/search/?searchtype=author&amp;query=Qi%2C+G">Guo-Jun Qi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.17061v1-abstract-short" style="display: inline;"> The Vision Transformer (ViT) has achieved notable success in computer vision, with its variants extensively validated across various downstream tasks, including semantic segmentation. However, designed as general-purpose visual encoders, ViT backbones often overlook the specific needs of task decoders, revealing opportunities to design decoders tailored to efficient semantic segmentation. This pap&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17061v1-abstract-full').style.display = 'inline'; document.getElementById('2411.17061v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.17061v1-abstract-full" style="display: none;"> The Vision Transformer (ViT) has achieved notable success in computer vision, with its variants extensively validated across various downstream tasks, including semantic segmentation. However, designed as general-purpose visual encoders, ViT backbones often overlook the specific needs of task decoders, revealing opportunities to design decoders tailored to efficient semantic segmentation. This paper proposes Strip Cross-Attention (SCASeg), an innovative decoder head explicitly designed for semantic segmentation. Instead of relying on the simple conventional skip connections, we employ lateral connections between the encoder and decoder stages, using encoder features as Queries for the cross-attention modules. Additionally, we introduce a Cross-Layer Block that blends hierarchical feature maps from different encoder and decoder stages to create a unified representation for Keys and Values. To further boost computational efficiency, SCASeg compresses queries and keys into strip-like patterns to optimize memory usage and inference speed over the traditional vanilla cross-attention. Moreover, the Cross-Layer Block incorporates the local perceptual strengths of convolution, enabling SCASeg to capture both global and local context dependencies across multiple layers. This approach facilitates effective feature interaction at different scales, improving the overall performance. Experiments show that the adaptable decoder of SCASeg produces competitive performance across different setups, surpassing leading segmentation architectures on all benchmark datasets, including ADE20K, Cityscapes, COCO-Stuff 164k, and Pascal VOC2012, even under varying computational limitations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17061v1-abstract-full').style.display = 'none'; document.getElementById('2411.17061v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 9 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.15729">arXiv:2411.15729</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.15729">pdf</a>, <a href="https://arxiv.org/format/2411.15729">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> OccludeNet: A Causal Journey into Mixed-View Actor-Centric Video Action Recognition under Occlusions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhou%2C+G">Guanyu Zhou</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wenxuan Liu</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wenxin Huang</a>, <a href="/search/?searchtype=author&amp;query=Jia%2C+X">Xuemei Jia</a>, <a href="/search/?searchtype=author&amp;query=Zhong%2C+X">Xian Zhong</a>, <a href="/search/?searchtype=author&amp;query=Lin%2C+C">Chia-Wen Lin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.15729v1-abstract-short" style="display: inline;"> The lack of occlusion data in commonly used action recognition video datasets limits model robustness and impedes sustained performance improvements. We construct OccludeNet, a large-scale occluded video dataset that includes both real-world and synthetic occlusion scene videos under various natural environments. OccludeNet features dynamic tracking occlusion, static scene occlusion, and multi-vie&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15729v1-abstract-full').style.display = 'inline'; document.getElementById('2411.15729v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.15729v1-abstract-full" style="display: none;"> The lack of occlusion data in commonly used action recognition video datasets limits model robustness and impedes sustained performance improvements. We construct OccludeNet, a large-scale occluded video dataset that includes both real-world and synthetic occlusion scene videos under various natural environments. OccludeNet features dynamic tracking occlusion, static scene occlusion, and multi-view interactive occlusion, addressing existing gaps in data. Our analysis reveals that occlusion impacts action classes differently, with actions involving low scene relevance and partial body visibility experiencing greater accuracy degradation. To overcome the limitations of current occlusion-focused approaches, we propose a structural causal model for occluded scenes and introduce the Causal Action Recognition (CAR) framework, which employs backdoor adjustment and counterfactual reasoning. This framework enhances key actor information, improving model robustness to occlusion. We anticipate that the challenges posed by OccludeNet will stimulate further exploration of causal relations in occlusion scenarios and encourage a reevaluation of class correlations, ultimately promoting sustainable performance improvements. The code and full dataset will be released soon. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15729v1-abstract-full').style.display = 'none'; document.getElementById('2411.15729v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.15571">arXiv:2411.15571</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.15571">pdf</a>, <a href="https://arxiv.org/format/2411.15571">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> </div> </div> <p class="title is-5 mathjax"> Dephasing-assisted diffusive dynamics in superconducting quantum circuits </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Liang%2C+Y">Yongqi Liang</a>, <a href="/search/?searchtype=author&amp;query=Xie%2C+C">Changrong Xie</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+Z">Zechen Guo</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+P">Peisheng Huang</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wenhui Huang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+Y">Yiting Liu</a>, <a href="/search/?searchtype=author&amp;query=Qiu%2C+J">Jiawei Qiu</a>, <a href="/search/?searchtype=author&amp;query=Sun%2C+X">Xuandong Sun</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+Z">Zilin Wang</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+X">Xiaohan Yang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+J">Jiawei Zhang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+J">Jiajian Zhang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+L">Libo Zhang</a>, <a href="/search/?searchtype=author&amp;query=Chu%2C+J">Ji Chu</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+W">Weijie Guo</a>, <a href="/search/?searchtype=author&amp;query=Jiang%2C+J">Ji Jiang</a>, <a href="/search/?searchtype=author&amp;query=Linpeng%2C+X">Xiayu Linpeng</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+S">Song Liu</a>, <a href="/search/?searchtype=author&amp;query=Niu%2C+J">Jingjing Niu</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yuxuan Zhou</a>, <a href="/search/?searchtype=author&amp;query=Ren%2C+W">Wenhui Ren</a>, <a href="/search/?searchtype=author&amp;query=Tao%2C+Z">Ziyu Tao</a>, <a href="/search/?searchtype=author&amp;query=Zhong%2C+Y">Youpeng Zhong</a>, <a href="/search/?searchtype=author&amp;query=Yu%2C+D">Dapeng Yu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.15571v1-abstract-short" style="display: inline;"> Random fluctuations caused by environmental noise can lead to decoherence in quantum systems. Exploring and controlling such dissipative processes is both fundamentally intriguing and essential for harnessing quantum systems to achieve practical advantages and deeper insights. In this Letter, we first demonstrate the diffusive dynamics assisted by controlled dephasing noise in superconducting quan&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15571v1-abstract-full').style.display = 'inline'; document.getElementById('2411.15571v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.15571v1-abstract-full" style="display: none;"> Random fluctuations caused by environmental noise can lead to decoherence in quantum systems. Exploring and controlling such dissipative processes is both fundamentally intriguing and essential for harnessing quantum systems to achieve practical advantages and deeper insights. In this Letter, we first demonstrate the diffusive dynamics assisted by controlled dephasing noise in superconducting quantum circuits, contrasting with coherent evolution. We show that dephasing can enhance localization in a superconducting qubit array with quasiperiodic order, even in the regime where all eigenstates remain spatially extended for the coherent counterpart. Furthermore, by preparing different excitation distributions in the qubit array, we observe that a more localized initial state relaxes to a uniformly distributed mixed state faster with dephasing noise, illustrating another counterintuitive phenomenon called Mpemba effect, i.e., a far-from-equilibrium state can relax toward the equilibrium faster. These results deepen our understanding of diffusive dynamics at the microscopic level, and demonstrate controlled dissipative processes as a valuable tool for investigating Markovian open quantum systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15571v1-abstract-full').style.display = 'none'; document.getElementById('2411.15571v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7+10 pages, 4+9 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.15441">arXiv:2411.15441</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.15441">pdf</a>, <a href="https://arxiv.org/format/2411.15441">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Study of $\it螞_{\it{b}}^\rm{0}$ and $\it螢_{\it{b}}^\rm{0}$ decays to $\it螞 h^+h^{&#39;-}$ and evidence for $CP$ violation in $\it螞_{\it{b}}^\rm{0}\to\it螞 K^+K^-$ decays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=LHCb+collaboration"> LHCb collaboration</a>, <a href="/search/?searchtype=author&amp;query=Aaij%2C+R">R. Aaij</a>, <a href="/search/?searchtype=author&amp;query=Abdelmotteleb%2C+A+S+W">A. S. W. Abdelmotteleb</a>, <a href="/search/?searchtype=author&amp;query=Beteta%2C+C+A">C. Abellan Beteta</a>, <a href="/search/?searchtype=author&amp;query=Abudin%C3%A9n%2C+F">F. Abudin茅n</a>, <a href="/search/?searchtype=author&amp;query=Ackernley%2C+T">T. Ackernley</a>, <a href="/search/?searchtype=author&amp;query=Adefisoye%2C+A+A">A. A. Adefisoye</a>, <a href="/search/?searchtype=author&amp;query=Adeva%2C+B">B. Adeva</a>, <a href="/search/?searchtype=author&amp;query=Adinolfi%2C+M">M. Adinolfi</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Agapopoulou%2C+C">C. Agapopoulou</a>, <a href="/search/?searchtype=author&amp;query=Aidala%2C+C+A">C. A. Aidala</a>, <a href="/search/?searchtype=author&amp;query=Ajaltouni%2C+Z">Z. Ajaltouni</a>, <a href="/search/?searchtype=author&amp;query=Akar%2C+S">S. Akar</a>, <a href="/search/?searchtype=author&amp;query=Akiba%2C+K">K. Akiba</a>, <a href="/search/?searchtype=author&amp;query=Albicocco%2C+P">P. Albicocco</a>, <a href="/search/?searchtype=author&amp;query=Albrecht%2C+J">J. Albrecht</a>, <a href="/search/?searchtype=author&amp;query=Alessio%2C+F">F. Alessio</a>, <a href="/search/?searchtype=author&amp;query=Alexander%2C+M">M. Alexander</a>, <a href="/search/?searchtype=author&amp;query=Aliouche%2C+Z">Z. Aliouche</a>, <a href="/search/?searchtype=author&amp;query=Cartelle%2C+P+A">P. Alvarez Cartelle</a>, <a href="/search/?searchtype=author&amp;query=Amalric%2C+R">R. Amalric</a>, <a href="/search/?searchtype=author&amp;query=Amato%2C+S">S. Amato</a>, <a href="/search/?searchtype=author&amp;query=Amey%2C+J+L">J. L. Amey</a>, <a href="/search/?searchtype=author&amp;query=Amhis%2C+Y">Y. Amhis</a> , et al. (1129 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.15441v1-abstract-short" style="display: inline;"> A study of $\it螞_{\it{b}}^\rm{0}$ and $\it螢_{\it{b}}^\rm{0}$ decays to $\it螞 h^{+} h^{\prime -}$ $(h^{(\prime)}=蟺, K)$ is performed using $pp$ collision data collected by the LHCb experiment during LHC Runs 1$-$2, corresponding to an integrated luminosity of $9~\rm{fb}^{-1}$. The branching fractions for these decays are measured using the $\it螞_{\it{b}}^\rm{0}\to\it螞_{\it{c}}^+(\to\it螞蟺^+)蟺^-$ dec&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15441v1-abstract-full').style.display = 'inline'; document.getElementById('2411.15441v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.15441v1-abstract-full" style="display: none;"> A study of $\it螞_{\it{b}}^\rm{0}$ and $\it螢_{\it{b}}^\rm{0}$ decays to $\it螞 h^{+} h^{\prime -}$ $(h^{(\prime)}=蟺, K)$ is performed using $pp$ collision data collected by the LHCb experiment during LHC Runs 1$-$2, corresponding to an integrated luminosity of $9~\rm{fb}^{-1}$. The branching fractions for these decays are measured using the $\it螞_{\it{b}}^\rm{0}\to\it螞_{\it{c}}^+(\to\it螞蟺^+)蟺^-$ decay as control channel. The decays $\it螞_{\it{b}}^\rm{0}\to\it螞蟺^+蟺^-$ and $\it螢_{\it{b}}^\rm{0}\to\it螞K^-蟺^+$ are observed for the first time. For decay modes with sufficient signal yields, $CP$ asymmetries are measured in the full and localized regions of the final-state phase space. Evidence is found for $CP$ violation in the $\it螞_{\it{b}}^\rm{0}\to\it螞K^+K^-$ decay, interpreted as originating primarily from an asymmetric $\it螞_{\it{b}}^\rm{0} \to \it{N}^{*+} \it{K}^-$ decay amplitude. The measured $CP$ asymmetries for the other decays are compatible with zero. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15441v1-abstract-full').style.display = 'none'; document.getElementById('2411.15441v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">All figures and tables, along with any supplementary material and additional information, are available at https://cern.ch/lhcbproject/Publications/p/LHCb-PAPER-2024-043.html (LHCb public pages)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> LHCb-PAPER-2024-043, CERN-EP-2024-281 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.15251">arXiv:2411.15251</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.15251">pdf</a>, <a href="https://arxiv.org/format/2411.15251">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Optimized Vessel Segmentation: A Structure-Agnostic Approach with Small Vessel Enhancement and Morphological Correction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Song%2C+D">Dongning Song</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Weijian Huang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+J">Jiarun Liu</a>, <a href="/search/?searchtype=author&amp;query=Islam%2C+M+J">Md Jahidul Islam</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+H">Hao Yang</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+S">Shanshan Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.15251v1-abstract-short" style="display: inline;"> Accurate segmentation of blood vessels is essential for various clinical assessments and postoperative analyses. However, the inherent challenges of vascular imaging, such as sparsity, fine granularity, low contrast, data distribution variability, and the critical need for preserving topological structure, making generalized vessel segmentation particularly complex. While specialized segmentation&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15251v1-abstract-full').style.display = 'inline'; document.getElementById('2411.15251v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.15251v1-abstract-full" style="display: none;"> Accurate segmentation of blood vessels is essential for various clinical assessments and postoperative analyses. However, the inherent challenges of vascular imaging, such as sparsity, fine granularity, low contrast, data distribution variability, and the critical need for preserving topological structure, making generalized vessel segmentation particularly complex. While specialized segmentation methods have been developed for specific anatomical regions, their over-reliance on tailored models hinders broader applicability and generalization. General-purpose segmentation models introduced in medical imaging often fail to address critical vascular characteristics, including the connectivity of segmentation results. To overcome these limitations, we propose an optimized vessel segmentation framework: a structure-agnostic approach incorporating small vessel enhancement and morphological correction for multi-modality vessel segmentation. To train and validate this framework, we compiled a comprehensive multi-modality dataset spanning 17 datasets and benchmarked our model against six SAM-based methods and 17 expert models. The results demonstrate that our approach achieves superior segmentation accuracy, generalization, and a 34.6% improvement in connectivity, underscoring its clinical potential. An ablation study further validates the effectiveness of the proposed improvements. We will release the code and dataset at github following the publication of this work. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15251v1-abstract-full').style.display = 'none'; document.getElementById('2411.15251v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 7 figurres, submitted to TIP</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.15076">arXiv:2411.15076</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.15076">pdf</a>, <a href="https://arxiv.org/format/2411.15076">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> </div> <p class="title is-5 mathjax"> RankByGene: Gene-Guided Histopathology Representation Learning Through Cross-Modal Ranking Consistency </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wentao Huang</a>, <a href="/search/?searchtype=author&amp;query=Xu%2C+M">Meilong Xu</a>, <a href="/search/?searchtype=author&amp;query=Hu%2C+X">Xiaoling Hu</a>, <a href="/search/?searchtype=author&amp;query=Abousamra%2C+S">Shahira Abousamra</a>, <a href="/search/?searchtype=author&amp;query=Ganguly%2C+A">Aniruddha Ganguly</a>, <a href="/search/?searchtype=author&amp;query=Kapse%2C+S">Saarthak Kapse</a>, <a href="/search/?searchtype=author&amp;query=Yurovsky%2C+A">Alisa Yurovsky</a>, <a href="/search/?searchtype=author&amp;query=Prasanna%2C+P">Prateek Prasanna</a>, <a href="/search/?searchtype=author&amp;query=Kurc%2C+T">Tahsin Kurc</a>, <a href="/search/?searchtype=author&amp;query=Saltz%2C+J">Joel Saltz</a>, <a href="/search/?searchtype=author&amp;query=Miller%2C+M+L">Michael L. Miller</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+C">Chao Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.15076v1-abstract-short" style="display: inline;"> Spatial transcriptomics (ST) provides essential spatial context by mapping gene expression within tissue, enabling detailed study of cellular heterogeneity and tissue organization. However, aligning ST data with histology images poses challenges due to inherent spatial distortions and modality-specific variations. Existing methods largely rely on direct alignment, which often fails to capture comp&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15076v1-abstract-full').style.display = 'inline'; document.getElementById('2411.15076v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.15076v1-abstract-full" style="display: none;"> Spatial transcriptomics (ST) provides essential spatial context by mapping gene expression within tissue, enabling detailed study of cellular heterogeneity and tissue organization. However, aligning ST data with histology images poses challenges due to inherent spatial distortions and modality-specific variations. Existing methods largely rely on direct alignment, which often fails to capture complex cross-modal relationships. To address these limitations, we propose a novel framework that aligns gene and image features using a ranking-based alignment loss, preserving relative similarity across modalities and enabling robust multi-scale alignment. To further enhance the alignment&#39;s stability, we employ self-supervised knowledge distillation with a teacher-student network architecture, effectively mitigating disruptions from high dimensionality, sparsity, and noise in gene expression data. Extensive experiments on gene expression prediction and survival analysis demonstrate our framework&#39;s effectiveness, showing improved alignment and predictive performance over existing methods and establishing a robust tool for gene-guided image representation learning in digital pathology. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15076v1-abstract-full').style.display = 'none'; document.getElementById('2411.15076v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">17 pages, 8 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14882">arXiv:2411.14882</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14882">pdf</a>, <a href="https://arxiv.org/ps/2411.14882">ps</a>, <a href="https://arxiv.org/format/2411.14882">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Analysis of PDEs">math.AP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Dynamical Systems">math.DS</span> </div> </div> <p class="title is-5 mathjax"> On Temporal Decay of Compressible Hookean Viscoelastic Fluids with Relatively Large Elasticity Coefficient </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Fu%2C+S">Shengbin Fu</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wenting Huang</a>, <a href="/search/?searchtype=author&amp;query=Jiang%2C+F">Fei Jiang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14882v2-abstract-short" style="display: inline;"> Recently, Jiang--Jiang (J. Differential Equations 282, 2021) showed the existence of unique strong solutions in spatial periodic domain (denoted by $\mathbb{T}^3$), whenever the elasticity coefficient is larger than the initial velocity perturbation of the rest state. Motivated by Jiang--Jiang&#39;s result, we revisit the Cauchy problem of the compressible viscoelastic fluids in Lagrangian coordinates&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14882v2-abstract-full').style.display = 'inline'; document.getElementById('2411.14882v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14882v2-abstract-full" style="display: none;"> Recently, Jiang--Jiang (J. Differential Equations 282, 2021) showed the existence of unique strong solutions in spatial periodic domain (denoted by $\mathbb{T}^3$), whenever the elasticity coefficient is larger than the initial velocity perturbation of the rest state. Motivated by Jiang--Jiang&#39;s result, we revisit the Cauchy problem of the compressible viscoelastic fluids in Lagrangian coordinates. Employing an energy method with temporal weights and an additional asymptotic stability condition of initial density in Lagrangian coordinates, we extend the Jiang--Jiang&#39;s result with exponential decay-in-time in $\mathbb{T}^3$ to the one with algebraic decay-in-time in the whole space $\mathbb{R}^3$. Thanks to the algebraic decay of solutions established by the energy method with temporal weights, we can further use the spectral analysis to improve the temporal decay rate of solutions. In particular, we find that the $k$-th order spatial derivatives of both the density and deformation perturbations converge to zero in $L^2(\mathbb{R}^3)$ at a rate of $(1+t)^{-\frac{3}{4}-\frac{k+1}{2}}$, which is faster than the decay rate $(1 +t)^{-\frac{3}{4}-\frac{k}{2}}$ obtained by Hu--Wu (SIAM J. Math. Anal. 45, 2013) for $k=0$ and $ 1$. In addition, it&#39;s well-known that the decay rate $(1+t)^{-\frac{3}{4}-\frac{k}{2}}$ of the density perturbation is optimal in the compressible Navier--Stokes equations (A.~Matsumura, T.~Nishida, Proc. Jpn. Acad. Ser-A. 55, 1979). Therefore, our faster temporal decay rates indicate that the elasticity accelerates the decay of the density perturbation after the rest state of a compressible viscoelastic fluid being perturbed. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14882v2-abstract-full').style.display = 'none'; document.getElementById('2411.14882v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14794">arXiv:2411.14794</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14794">pdf</a>, <a href="https://arxiv.org/format/2411.14794">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> VideoEspresso: A Large-Scale Chain-of-Thought Dataset for Fine-Grained Video Reasoning via Core Frame Selection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Han%2C+S">Songhao Han</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wei Huang</a>, <a href="/search/?searchtype=author&amp;query=Shi%2C+H">Hairong Shi</a>, <a href="/search/?searchtype=author&amp;query=Zhuo%2C+L">Le Zhuo</a>, <a href="/search/?searchtype=author&amp;query=Su%2C+X">Xiu Su</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+S">Shifeng Zhang</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+X">Xu Zhou</a>, <a href="/search/?searchtype=author&amp;query=Qi%2C+X">Xiaojuan Qi</a>, <a href="/search/?searchtype=author&amp;query=Liao%2C+Y">Yue Liao</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+S">Si Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14794v1-abstract-short" style="display: inline;"> The advancement of Large Vision Language Models (LVLMs) has significantly improved multimodal understanding, yet challenges remain in video reasoning tasks due to the scarcity of high-quality, large-scale datasets. Existing video question-answering (VideoQA) datasets often rely on costly manual annotations with insufficient granularity or automatic construction methods with redundant frame-by-fram&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14794v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14794v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14794v1-abstract-full" style="display: none;"> The advancement of Large Vision Language Models (LVLMs) has significantly improved multimodal understanding, yet challenges remain in video reasoning tasks due to the scarcity of high-quality, large-scale datasets. Existing video question-answering (VideoQA) datasets often rely on costly manual annotations with insufficient granularity or automatic construction methods with redundant frame-by-frame analysis, limiting their scalability and effectiveness for complex reasoning. To address these challenges, we introduce VideoEspresso, a novel dataset that features VideoQA pairs preserving essential spatial details and temporal coherence, along with multimodal annotations of intermediate reasoning steps. Our construction pipeline employs a semantic-aware method to reduce redundancy, followed by generating QA pairs using GPT-4o. We further develop video Chain-of-Thought (CoT) annotations to enrich reasoning processes, guiding GPT-4o in extracting logical relationships from QA pairs and video content. To exploit the potential of high-quality VideoQA pairs, we propose a Hybrid LVLMs Collaboration framework, featuring a Frame Selector and a two-stage instruction fine-tuned reasoning LVLM. This framework adaptively selects core frames and performs CoT reasoning using multimodal evidence. Evaluated on our proposed benchmark with 14 tasks against 9 popular LVLMs, our method outperforms existing baselines on most tasks, demonstrating superior video reasoning capabilities. Our code and dataset will be released at: https://github.com/hshjerry/VideoEspresso <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14794v1-abstract-full').style.display = 'none'; document.getElementById('2411.14794v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 14 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14772">arXiv:2411.14772</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14772">pdf</a>, <a href="https://arxiv.org/format/2411.14772">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> </div> <p class="title is-5 mathjax"> Large-angle twisted photonic crystal semiconductor nanolasers with ultra-low thresholds operating in the C-band </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Wang%2C+Y">Yilan Wang</a>, <a href="/search/?searchtype=author&amp;query=Tian%2C+F">Feng Tian</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wendi Huang</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+T">Taojie Zhou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14772v1-abstract-short" style="display: inline;"> Nanolasers, characterized by enhanced optical localization at subwavelength scale, have emerged as promising coherent light sources for ultra-compact, high-speed and energy-efficient photonic integrated circuits. Twisted photonic crystal nanocavity, constructed by stacking two layers of photonic crystal structure with a specified rotation angle, enables strong light confinement with an ultra-small&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14772v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14772v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14772v1-abstract-full" style="display: none;"> Nanolasers, characterized by enhanced optical localization at subwavelength scale, have emerged as promising coherent light sources for ultra-compact, high-speed and energy-efficient photonic integrated circuits. Twisted photonic crystal nanocavity, constructed by stacking two layers of photonic crystal structure with a specified rotation angle, enables strong light confinement with an ultra-small mode volume and an extremely high quality factor. The twisted angle can be randomly selected, providing the possibility of actively tuning the resonant wavelength and optical mode distribution within a nanoscale twisted cavity. Here, we demonstrate large-angle twisted single-mode photonic crystal nanolasers operating in the C-band with an exceptionally ultra-compact footprint of approximately 25 $渭m^2$ and an ultra-small mode volume of 0.47 $(位/n)^3$. The reported twisted photonic crystal nanolasers are optically pumped at room temperature with an ultra-low threshold of $\sim$ 1.25 $kW/cm^2$. Our work provides a prospective method for easily constructing robust nanolasers by twisting angles, and paves the way for achieving high-performance nanoscale coherent light sources for densely integrated photonic chips. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14772v1-abstract-full').style.display = 'none'; document.getElementById('2411.14772v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14588">arXiv:2411.14588</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14588">pdf</a>, <a href="https://arxiv.org/format/2411.14588">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Populations and Evolution">q-bio.PE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> </div> <p class="title is-5 mathjax"> Population dynamics of multiple ecDNA types </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Scanu%2C+E">Elisa Scanu</a>, <a href="/search/?searchtype=author&amp;query=Werner%2C+B">Benjamin Werner</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Weini Huang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14588v1-abstract-short" style="display: inline;"> Extrachromosomal DNA (ecDNA) can drive oncogene amplification, gene expression and intratumor heterogeneity, representing a major force in cancer initiation and progression. The phenomenon becomes even more intricate as distinct types of ecDNA present within a single cancer cell. While exciting as a new and significant observation across various cancer types, there is a lack of a general framework&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14588v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14588v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14588v1-abstract-full" style="display: none;"> Extrachromosomal DNA (ecDNA) can drive oncogene amplification, gene expression and intratumor heterogeneity, representing a major force in cancer initiation and progression. The phenomenon becomes even more intricate as distinct types of ecDNA present within a single cancer cell. While exciting as a new and significant observation across various cancer types, there is a lack of a general framework capturing the dynamics of multiple ecDNA types theoretically. Here, we present novel mathematical models investigating the proliferation and expansion of multiple ecDNA types in a growing cell population. By switching on and off a single parameter, we model different scenarios including ecDNA species with different oncogenes, genotypes with same oncogenes but different point mutations and phenotypes with identical genetic compositions but different functions. We analyse the fraction of ecDNA-positive and free cells as well as how the mean and variance of the copy number of cells carrying one or more ecDNA types change over time. Our results showed that switching does not play a role in the fraction and copy number distribution of total ecDNA-positive cells, if selection is identical among different ecDNA types. In addition, while cells with multiple ecDNA cannot be maintained in the scenario of ecDNA species without extra fitness advantages, they can persist and even dominate the ecDNA-positive population if switching is possible. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14588v1-abstract-full').style.display = 'none'; document.getElementById('2411.14588v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">32 pages, 5 main figures, 7 supplementary figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14164">arXiv:2411.14164</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14164">pdf</a>, <a href="https://arxiv.org/format/2411.14164">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> FoPru: Focal Pruning for Efficient Large Vision-Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Jiang%2C+L">Lei Jiang</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Weizhe Huang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+T">Tongxuan Liu</a>, <a href="/search/?searchtype=author&amp;query=Zeng%2C+Y">Yuting Zeng</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+J">Jing Li</a>, <a href="/search/?searchtype=author&amp;query=Cheng%2C+L">Lechao Cheng</a>, <a href="/search/?searchtype=author&amp;query=Xu%2C+X">Xiaohua Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14164v1-abstract-short" style="display: inline;"> Large Vision-Language Models (LVLMs) represent a significant advancement toward achieving superior multimodal capabilities by enabling powerful Large Language Models (LLMs) to understand visual input. Typically, LVLMs utilize visual encoders, such as CLIP, to transform images into visual tokens, which are then aligned with textual tokens through projection layers before being input into the LLM fo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14164v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14164v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14164v1-abstract-full" style="display: none;"> Large Vision-Language Models (LVLMs) represent a significant advancement toward achieving superior multimodal capabilities by enabling powerful Large Language Models (LLMs) to understand visual input. Typically, LVLMs utilize visual encoders, such as CLIP, to transform images into visual tokens, which are then aligned with textual tokens through projection layers before being input into the LLM for inference. Although existing LVLMs have achieved significant success, their inference efficiency is still limited by the substantial number of visual tokens and the potential redundancy among them. To mitigate this issue, we propose Focal Pruning (FoPru), a training-free method that prunes visual tokens based on the attention-based token significance derived from the vision encoder. Specifically, we introduce two alternative pruning strategies: 1) the rank strategy, which leverages all token significance scores to retain more critical tokens in a global view; 2) the row strategy, which focuses on preserving continuous key information in images from a local perspective. Finally, the selected tokens are reordered to maintain their original positional relationships. Extensive experiments across various LVLMs and multimodal datasets demonstrate that our method can prune a large number of redundant tokens while maintaining high accuracy, leading to significant improvements in inference efficiency. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14164v1-abstract-full').style.display = 'none'; document.getElementById('2411.14164v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 7 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13971">arXiv:2411.13971</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13971">pdf</a>, <a href="https://arxiv.org/format/2411.13971">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Soft Condensed Matter">cond-mat.soft</span> </div> </div> <p class="title is-5 mathjax"> How do imperfections cause asymmetry in elastic snap-through? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Giudici%2C+A">Andrea Giudici</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Weicheng Huang</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+Q">Qiong Wang</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+Y">Yuzhe Wang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+M">Mingchao Liu</a>, <a href="/search/?searchtype=author&amp;query=Tawfick%2C+S">Sameh Tawfick</a>, <a href="/search/?searchtype=author&amp;query=Vella%2C+D">Dominic Vella</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13971v1-abstract-short" style="display: inline;"> A symmetrically-buckled arch whose boundaries are clamped at an angle has two stable equilibria: an inverted and a natural state. When the distance between the clamps is increased (i.e. the confinement is decreased) the system snaps from the inverted to the natural state. Depending on the rate at which the confinement is decreased (&#39;unloading&#39;), the symmetry of the system during snap-through may c&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13971v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13971v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13971v1-abstract-full" style="display: none;"> A symmetrically-buckled arch whose boundaries are clamped at an angle has two stable equilibria: an inverted and a natural state. When the distance between the clamps is increased (i.e. the confinement is decreased) the system snaps from the inverted to the natural state. Depending on the rate at which the confinement is decreased (&#39;unloading&#39;), the symmetry of the system during snap-through may change: slow unloading results in snap-through occurring asymmetrically, while fast unloading results in a symmetric snap-through. It has recently been shown [Wang et al., Phys. Rev. Lett. 132, 267201 (2024)] that the transient asymmetry at slow unloading rates is the result of the amplification of small asymmetric precursor oscillations (shape perturbations) introduced dynamically to the system, even when the system itself is perfectly symmetric. In reality, however, imperfections, such as small asymmetries in the boundary conditions, are present too. Using numerical simulations and a simple toy model, we discuss the relative importance of intrinsic imperfections and initial asymmetric shape perturbations in determining the transient asymmetry observed. We show that, for small initial perturbations, the magnitude of the asymmetry grows in proportion to the size of the intrinsic imperfection but that, when initial shape perturbations are large, intrinsic imperfections are unimportant - the asymmetry of the system is dominated by the transient amplification of the initial asymmetric shape perturbations. We also show that the dominant origin of asymmetry changes the way that asymmetry grows dynamically. Our results may guide engineering and design of snapping beams used to control insect-sized jumping robots. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13971v1-abstract-full').style.display = 'none'; document.getElementById('2411.13971v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13042">arXiv:2411.13042</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13042">pdf</a>, <a href="https://arxiv.org/format/2411.13042">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Attentive Contextual Attention for Cloud Removal </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wenli Huang</a>, <a href="/search/?searchtype=author&amp;query=Deng%2C+Y">Ye Deng</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+Y">Yang Wu</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+J">Jinjun Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13042v1-abstract-short" style="display: inline;"> Cloud cover can significantly hinder the use of remote sensing images for Earth observation, prompting urgent advancements in cloud removal technology. Recently, deep learning strategies have shown strong potential in restoring cloud-obscured areas. These methods utilize convolution to extract intricate local features and attention mechanisms to gather long-range information, improving the overall&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13042v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13042v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13042v1-abstract-full" style="display: none;"> Cloud cover can significantly hinder the use of remote sensing images for Earth observation, prompting urgent advancements in cloud removal technology. Recently, deep learning strategies have shown strong potential in restoring cloud-obscured areas. These methods utilize convolution to extract intricate local features and attention mechanisms to gather long-range information, improving the overall comprehension of the scene. However, a common drawback of these approaches is that the resulting images often suffer from blurriness, artifacts, and inconsistencies. This is partly because attention mechanisms apply weights to all features based on generalized similarity scores, which can inadvertently introduce noise and irrelevant details from cloud-covered areas. To overcome this limitation and better capture relevant distant context, we introduce a novel approach named Attentive Contextual Attention (AC-Attention). This method enhances conventional attention mechanisms by dynamically learning data-driven attentive selection scores, enabling it to filter out noise and irrelevant features effectively. By integrating the AC-Attention module into the DSen2-CR cloud removal framework, we significantly improve the model&#39;s ability to capture essential distant information, leading to more effective cloud removal. Our extensive evaluation of various datasets shows that our method outperforms existing ones regarding image reconstruction quality. Additionally, we conducted ablation studies by integrating AC-Attention into multiple existing methods and widely used network architectures. These studies demonstrate the effectiveness and adaptability of AC-Attention and reveal its ability to focus on relevant features, thereby improving the overall performance of the networks. The code is available at \url{https://github.com/huangwenwenlili/ACA-CRNet}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13042v1-abstract-full').style.display = 'none'; document.getElementById('2411.13042v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages, 7 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12402">arXiv:2411.12402</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12402">pdf</a>, <a href="https://arxiv.org/ps/2411.12402">ps</a>, <a href="https://arxiv.org/format/2411.12402">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Fluid Dynamics">physics.flu-dyn</span> </div> </div> <p class="title is-5 mathjax"> A total-shear-stress-conserved wall model for large-eddy simulation of high-Reynolds number wall turbulence </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Liu%2C+H">Huan-Cong Liu</a>, <a href="/search/?searchtype=author&amp;query=Xu%2C+C">Chun-Xiao Xu</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wei-Xi Huang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12402v1-abstract-short" style="display: inline;"> Wall-modeled large-eddy simulation (WMLES) is widely recognized as a useful method for simulation of turbulent flows at high Reynolds numbers. Nevertheless, a continual issue in different wall models is the shift of the mean velocity profile from the wall-model/RANS (Reynolds-averaged Navier-Stokes) region to the LES region. This phenomenon, referred to as logarithmic layer mismatch (LLM), occurs&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12402v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12402v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12402v1-abstract-full" style="display: none;"> Wall-modeled large-eddy simulation (WMLES) is widely recognized as a useful method for simulation of turbulent flows at high Reynolds numbers. Nevertheless, a continual issue in different wall models is the shift of the mean velocity profile from the wall-model/RANS (Reynolds-averaged Navier-Stokes) region to the LES region. This phenomenon, referred to as logarithmic layer mismatch (LLM), occurs in both wall shear stress models and hybrid RANS/LES models. Many efforts have been made to explain and resolve this mismatch, including decreasing the high correlation between the wall shear stress and the velocity at the matching layer, modifying the subgrid-scale (SGS) eddy viscosity, and adding a stochastic forcing. It is widely believed that the inclusion of the resolved Reynolds shear stress (or the convection term) is essential to elliminate the LLM, as it prevents the overseimation of the modeled Reynolds shear stress and promotes the generation of the small-scale flow structures in the near-wall region. In this work, by comparing three different SGS eddy viscosity models, we demonstrate that ensuring the total shear stress conservation (TSSC) conservation is key to resolving the LLM. Under the TSSC framework, the effect of the convection term on LLM can be quantitatively assessed. Furthermore, a modified SGS eddy viscosity modfication model that adheres to the TSSC constraint is tested at different Reynolds numbers ($Re_蟿=1000, 2000, 4200$). Our results demonstrate the robust performance of the present model in predicting skin friction and low-order turbulence statistics, even under a relatively low grid resolution ($螖x^+, 螖z^+ \lesssim 500$, $2\leq 螖_x/螖_{y,mat} \leq 4$, where $螖_{y,mat}$ is the wall-normal grid spacing in the wall-model region). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12402v1-abstract-full').style.display = 'none'; document.getElementById('2411.12402v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12178">arXiv:2411.12178</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12178">pdf</a>, <a href="https://arxiv.org/format/2411.12178">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> First evidence for direct CP violation in beauty to charmonium decays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=LHCb+collaboration"> LHCb collaboration</a>, <a href="/search/?searchtype=author&amp;query=Aaij%2C+R">R. Aaij</a>, <a href="/search/?searchtype=author&amp;query=Abdelmotteleb%2C+A+S+W">A. S. W. Abdelmotteleb</a>, <a href="/search/?searchtype=author&amp;query=Beteta%2C+C+A">C. Abellan Beteta</a>, <a href="/search/?searchtype=author&amp;query=Abudin%C3%A9n%2C+F">F. Abudin茅n</a>, <a href="/search/?searchtype=author&amp;query=Ackernley%2C+T">T. Ackernley</a>, <a href="/search/?searchtype=author&amp;query=Adefisoye%2C+A+A">A. A. Adefisoye</a>, <a href="/search/?searchtype=author&amp;query=Adeva%2C+B">B. Adeva</a>, <a href="/search/?searchtype=author&amp;query=Adinolfi%2C+M">M. Adinolfi</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Agapopoulou%2C+C">C. Agapopoulou</a>, <a href="/search/?searchtype=author&amp;query=Aidala%2C+C+A">C. A. Aidala</a>, <a href="/search/?searchtype=author&amp;query=Ajaltouni%2C+Z">Z. Ajaltouni</a>, <a href="/search/?searchtype=author&amp;query=Akar%2C+S">S. Akar</a>, <a href="/search/?searchtype=author&amp;query=Akiba%2C+K">K. Akiba</a>, <a href="/search/?searchtype=author&amp;query=Albicocco%2C+P">P. Albicocco</a>, <a href="/search/?searchtype=author&amp;query=Albrecht%2C+J">J. Albrecht</a>, <a href="/search/?searchtype=author&amp;query=Alessio%2C+F">F. Alessio</a>, <a href="/search/?searchtype=author&amp;query=Alexander%2C+M">M. Alexander</a>, <a href="/search/?searchtype=author&amp;query=Aliouche%2C+Z">Z. Aliouche</a>, <a href="/search/?searchtype=author&amp;query=Cartelle%2C+P+A">P. Alvarez Cartelle</a>, <a href="/search/?searchtype=author&amp;query=Amalric%2C+R">R. Amalric</a>, <a href="/search/?searchtype=author&amp;query=Amato%2C+S">S. Amato</a>, <a href="/search/?searchtype=author&amp;query=Amey%2C+J+L">J. L. Amey</a>, <a href="/search/?searchtype=author&amp;query=Amhis%2C+Y">Y. Amhis</a> , et al. (1127 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12178v2-abstract-short" style="display: inline;"> The $C\!P$ asymmetry and branching fraction of the CKM-suppressed decay $B^+\!\to J\mskip -3mu/\mskip -2mu蠄\,蟺^+$ are precisely measured relative to the favoured decay $B^+\!\to J\mskip -3mu/\mskip -2mu蠄\,K^+$, using a sample of proton-proton collision data corresponding to an integrated luminosity of $5.4~\mathrm{fb}^{-1}$ recorded at center-of-mass energy of $13~\mathrm{TeV}$ during 2016--2018.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12178v2-abstract-full').style.display = 'inline'; document.getElementById('2411.12178v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12178v2-abstract-full" style="display: none;"> The $C\!P$ asymmetry and branching fraction of the CKM-suppressed decay $B^+\!\to J\mskip -3mu/\mskip -2mu蠄\,蟺^+$ are precisely measured relative to the favoured decay $B^+\!\to J\mskip -3mu/\mskip -2mu蠄\,K^+$, using a sample of proton-proton collision data corresponding to an integrated luminosity of $5.4~\mathrm{fb}^{-1}$ recorded at center-of-mass energy of $13~\mathrm{TeV}$ during 2016--2018. The results of the $C\!P$ asymmetry difference and branching fraction ratio are \begin{align*} 螖\mathcal{A}^{C\!P} &amp;\equiv \mathcal{A}^{C\!P}(B^+ \to J\mskip -3mu/\mskip -2mu蠄\,蟺^+) - \mathcal{A}^{C\!P}(B^+ \to J\mskip -3mu/\mskip -2mu蠄\,K^+) = (1.29 \pm 0.49 \pm 0.08) \times 10^{-2}, \end{align*} \begin{equation*} \mathcal{R}_{蟺/K} \equiv \frac{\mathcal{B}(B^+ \!\to J\mskip -3mu/\mskip -2mu蠄\,蟺^+)}{\mathcal{B}(B^+ \!\to J\mskip -3mu/\mskip -2mu蠄\,K^+)} = (3.852 \pm 0.022 \pm 0.018) \times 10^{-2}. \end{equation*} where the first uncertainties are statistical and the second systematic. A combination with previous LHCb results based on data collected at $7$ and $8~\mathrm{TeV}$ in 2011 and 2012 yields $螖\mathcal{A}^{C\!P} = (1.42 \pm 0.43 \pm 0.08) \times 10^{-2}$ and $\mathcal{R}_{蟺/K} = (3.846 \pm 0.018 \pm 0.018) \times 10^{-2}$. The combined $螖\mathcal{A}^{C\!P}$ value deviates from zero by 3.2 standard deviations, providing the first evidence for direct $C\!P$ violation in the amplitudes of beauty decays to charmonium final states. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12178v2-abstract-full').style.display = 'none'; document.getElementById('2411.12178v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 2 figures, no conference or journal information All figures and tables, along with machine-readable versions and any supplementary material and additional information, are available at https://lbfence.cern.ch/alcm/public/analysis/full-details/1623/ (LHCb public pages)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> LHCb-PAPER-2024-031 CERN-EP-2024-286 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12096">arXiv:2411.12096</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12096">pdf</a>, <a href="https://arxiv.org/format/2411.12096">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Materials Science">cond-mat.mtrl-sci</span> </div> </div> <p class="title is-5 mathjax"> Cartesian Atomic Moment Machine Learning Interatomic Potentials </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Wen%2C+M">Mingjian Wen</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wei-Fan Huang</a>, <a href="/search/?searchtype=author&amp;query=Dai%2C+J">Jin Dai</a>, <a href="/search/?searchtype=author&amp;query=Adhikari%2C+S">Santosh Adhikari</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12096v1-abstract-short" style="display: inline;"> Machine learning interatomic potentials (MLIPs) have substantially advanced atomistic simulations in materials science and chemistry by providing a compelling balance between accuracy and computational efficiency. While leading MLIPs rely on representations of atomic environments using spherical tensors, Cartesian representations offer potential advantages in simplicity and efficiency. In this wor&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12096v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12096v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12096v1-abstract-full" style="display: none;"> Machine learning interatomic potentials (MLIPs) have substantially advanced atomistic simulations in materials science and chemistry by providing a compelling balance between accuracy and computational efficiency. While leading MLIPs rely on representations of atomic environments using spherical tensors, Cartesian representations offer potential advantages in simplicity and efficiency. In this work, we introduce Cartesian Atomic Moment Potentials (CAMP), an approach equivalent to models based on spherical tensors but operating entirely in the Cartesian space. CAMP constructs atomic moment tensors from neighboring atoms and combines these through tensor products to incorporate higher body-order interactions, which can provide a complete description of local atomic environments. By integrating these into a graph neural network (GNN) framework, CAMP enables physically-motivated and systematically improvable potentials. It requires minimal hyperparameter tuning that simplifies the training process. The model demonstrates excellent performance across diverse systems, including periodic structures, small organic molecules, and two-dimensional materials. It achieves accuracy, efficiency, and stability in molecular dynamics simulations surpassing or comparable to current leading models. By combining the strengths of Cartesian representations with the expressiveness of GNNs, CAMP provides a powerful tool for atomistic simulations to accelerate materials understanding and discovery. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12096v1-abstract-full').style.display = 'none'; document.getElementById('2411.12096v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11930">arXiv:2411.11930</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11930">pdf</a>, <a href="https://arxiv.org/format/2411.11930">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> AtomThink: A Slow Thinking Framework for Multimodal Mathematical Reasoning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Xiang%2C+K">Kun Xiang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+Z">Zhili Liu</a>, <a href="/search/?searchtype=author&amp;query=Jiang%2C+Z">Zihao Jiang</a>, <a href="/search/?searchtype=author&amp;query=Nie%2C+Y">Yunshuang Nie</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+R">Runhui Huang</a>, <a href="/search/?searchtype=author&amp;query=Fan%2C+H">Haoxiang Fan</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+H">Hanhui Li</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Weiran Huang</a>, <a href="/search/?searchtype=author&amp;query=Zeng%2C+Y">Yihan Zeng</a>, <a href="/search/?searchtype=author&amp;query=Han%2C+J">Jianhua Han</a>, <a href="/search/?searchtype=author&amp;query=Hong%2C+L">Lanqing Hong</a>, <a href="/search/?searchtype=author&amp;query=Xu%2C+H">Hang Xu</a>, <a href="/search/?searchtype=author&amp;query=Liang%2C+X">Xiaodan Liang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11930v2-abstract-short" style="display: inline;"> In this paper, we address the challenging task of multimodal mathematical reasoning by incorporating the ability of ``slow thinking&#34; into multimodal large language models (MLLMs). Contrary to existing methods that rely on direct or fast thinking, our key idea is to construct long chains of thought (CoT) consisting of atomic actions in a step-by-step manner, guiding MLLMs to perform complex reasoni&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11930v2-abstract-full').style.display = 'inline'; document.getElementById('2411.11930v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11930v2-abstract-full" style="display: none;"> In this paper, we address the challenging task of multimodal mathematical reasoning by incorporating the ability of ``slow thinking&#34; into multimodal large language models (MLLMs). Contrary to existing methods that rely on direct or fast thinking, our key idea is to construct long chains of thought (CoT) consisting of atomic actions in a step-by-step manner, guiding MLLMs to perform complex reasoning. To this end, we design a novel AtomThink framework composed of three key modules: (i) a CoT annotation engine that automatically generates high-quality CoT annotations to address the lack of high-quality visual mathematical data; (ii) an atomic step fine-tuning strategy that jointly optimizes an MLLM and a policy reward model (PRM) for step-wise reasoning; and (iii) four different search strategies that can be applied with the PRM to complete reasoning. Additionally, we propose AtomMATH, a large-scale multimodal dataset of long CoTs, and an atomic capability evaluation metric for mathematical tasks. Extensive experimental results show that the proposed AtomThink significantly improves the performance of baseline MLLMs, achieving approximately 50\% relative accuracy gains on MathVista and 120\% on MathVerse. To support the advancement of multimodal slow-thinking models, we will make our code and dataset publicly available on https://github.com/Quinn777/AtomThink. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11930v2-abstract-full').style.display = 'none'; document.getElementById('2411.11930v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11009">arXiv:2411.11009</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11009">pdf</a>, <a href="https://arxiv.org/format/2411.11009">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> </div> <p class="title is-5 mathjax"> Ultra-compact topological photonic crystal rainbow nanolasers operating in the 1550 nm telecom band with wavelength-scale mode volumes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Tian%2C+F">Feng Tian</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+Y">Yilan Wang</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wendi Huang</a>, <a href="/search/?searchtype=author&amp;query=Fang%2C+X">Xuan Fang</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+S">Shengqun Guo</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+T">Taojie Zhou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11009v1-abstract-short" style="display: inline;"> Density-integrated, multi-wavelength nanoscale lasers with ultra-low power consumption and ultra-compact footprints are essential for energy-efficient, fast and high-throughput data processing. Currently, on-chip multi-wavelength lasers predominantly rely on arrays of discrete large-scale conventional semiconductor lasers that are susceptible to the fabrication imperfections. Topological rainbow n&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11009v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11009v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11009v1-abstract-full" style="display: none;"> Density-integrated, multi-wavelength nanoscale lasers with ultra-low power consumption and ultra-compact footprints are essential for energy-efficient, fast and high-throughput data processing. Currently, on-chip multi-wavelength lasers predominantly rely on arrays of discrete large-scale conventional semiconductor lasers that are susceptible to the fabrication imperfections. Topological rainbow nanolasers, which spatially confine and emit specific topologically protected light frequencies, offer a prospective approach for achieving ultra-compact integrated multi-wavelength light sources with enhanced robustness against perturbations and defects. However, it remains a significant challenge to achieve highly localized topological rainbow trapping in nanocavities for laser emission with both high quality factors and ultra-small mode volumes. Here, we experimentally report ultra-compact topological photonic crystal rainbow nanolasers operating in the 1550 nm telecom band. Specifically, we present rainbow-like emission with uniform wavelength spacing and wavelength-scale mode volume $\sim 0.7 \left(\frac位{n}\right)^3$ in a one-dimensional topological rainbow nanolaser, exhibiting robust lasing operation across a wide temperature range and a spectral tuning capability of approximately 70 nm. Additionally, we demonstrate an ultra-compact two-dimensional topological rainbow nanolaser in an exceptionally compact footprint of nearly 0.002 $\text{mm}^2$, featuring a broad rainbow spectra with 64 continuously tuned lasing peaks. Our work provides a promising method for realizing robust and nanoscale multi-wavelength tunable laser sources, paving the way for numerous potential applications in ultra-compact photonic chips. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11009v1-abstract-full').style.display = 'none'; document.getElementById('2411.11009v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10932">arXiv:2411.10932</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10932">pdf</a>, <a href="https://arxiv.org/format/2411.10932">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Constrained Diffusion with Trust Sampling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Huang%2C+W">William Huang</a>, <a href="/search/?searchtype=author&amp;query=Jiang%2C+Y">Yifeng Jiang</a>, <a href="/search/?searchtype=author&amp;query=Van+Wouwe%2C+T">Tom Van Wouwe</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+C+K">C. Karen Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10932v1-abstract-short" style="display: inline;"> Diffusion models have demonstrated significant promise in various generative tasks; however, they often struggle to satisfy challenging constraints. Our approach addresses this limitation by rethinking training-free loss-guided diffusion from an optimization perspective. We formulate a series of constrained optimizations throughout the inference process of a diffusion model. In each optimization,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10932v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10932v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10932v1-abstract-full" style="display: none;"> Diffusion models have demonstrated significant promise in various generative tasks; however, they often struggle to satisfy challenging constraints. Our approach addresses this limitation by rethinking training-free loss-guided diffusion from an optimization perspective. We formulate a series of constrained optimizations throughout the inference process of a diffusion model. In each optimization, we allow the sample to take multiple steps along the gradient of the proxy constraint function until we can no longer trust the proxy, according to the variance at each diffusion level. Additionally, we estimate the state manifold of diffusion model to allow for early termination when the sample starts to wander away from the state manifold at each diffusion step. Trust sampling effectively balances between following the unconditional diffusion model and adhering to the loss guidance, enabling more flexible and accurate constrained generation. We demonstrate the efficacy of our method through extensive experiments on complex tasks, and in drastically different domains of images and 3D motion generation, showing significant improvements over existing methods in terms of generation quality. Our implementation is available at https://github.com/will-s-h/trust-sampling. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10932v1-abstract-full').style.display = 'none'; document.getElementById('2411.10932v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 6 figures, NeurIPS</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10928">arXiv:2411.10928</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10928">pdf</a>, <a href="https://arxiv.org/format/2411.10928">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Learn from Downstream and Be Yourself in Multimodal Large Language Model Fine-Tuning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wenke Huang</a>, <a href="/search/?searchtype=author&amp;query=Liang%2C+J">Jian Liang</a>, <a href="/search/?searchtype=author&amp;query=Shi%2C+Z">Zekun Shi</a>, <a href="/search/?searchtype=author&amp;query=Zhu%2C+D">Didi Zhu</a>, <a href="/search/?searchtype=author&amp;query=Wan%2C+G">Guancheng Wan</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+H">He Li</a>, <a href="/search/?searchtype=author&amp;query=Du%2C+B">Bo Du</a>, <a href="/search/?searchtype=author&amp;query=Tao%2C+D">Dacheng Tao</a>, <a href="/search/?searchtype=author&amp;query=Ye%2C+M">Mang Ye</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10928v1-abstract-short" style="display: inline;"> Multimodal Large Language Model (MLLM) have demonstrated strong generalization capabilities across diverse distributions and tasks, largely due to extensive pre-training datasets. Fine-tuning MLLM has become a common practice to improve performance on specific downstream tasks. However, during fine-tuning, MLLM often faces the risk of forgetting knowledge acquired during pre-training, which can re&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10928v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10928v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10928v1-abstract-full" style="display: none;"> Multimodal Large Language Model (MLLM) have demonstrated strong generalization capabilities across diverse distributions and tasks, largely due to extensive pre-training datasets. Fine-tuning MLLM has become a common practice to improve performance on specific downstream tasks. However, during fine-tuning, MLLM often faces the risk of forgetting knowledge acquired during pre-training, which can result in a decline in generalization abilities. To balance the trade-off between generalization and specialization, we propose measuring the parameter importance for both pre-trained and fine-tuning distributions, based on frozen pre-trained weight magnitude and accumulated fine-tuning gradient values. We further apply an importance-aware weight allocation strategy, selectively updating relatively important parameters for downstream tasks. We conduct empirical evaluations on both image captioning and visual question-answering tasks using various MLLM architectures. The comprehensive experimental analysis demonstrates the effectiveness of the proposed solution, highlighting the efficiency of the crucial modules in enhancing downstream specialization performance while mitigating generalization degradation in MLLM Fine-Tuning. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10928v1-abstract-full').style.display = 'none'; document.getElementById('2411.10928v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10752">arXiv:2411.10752</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10752">pdf</a>, <a href="https://arxiv.org/format/2411.10752">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Towards a Comprehensive Benchmark for Pathological Lymph Node Metastasis in Breast Cancer Sections </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Ling%2C+X">Xitong Ling</a>, <a href="/search/?searchtype=author&amp;query=Lei%2C+Y">Yuanyuan Lei</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+J">Jiawen Li</a>, <a href="/search/?searchtype=author&amp;query=Cheng%2C+J">Junru Cheng</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wenting Huang</a>, <a href="/search/?searchtype=author&amp;query=Guan%2C+T">Tian Guan</a>, <a href="/search/?searchtype=author&amp;query=Guan%2C+J">Jian Guan</a>, <a href="/search/?searchtype=author&amp;query=He%2C+Y">Yonghong He</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10752v1-abstract-short" style="display: inline;"> Advances in optical microscopy scanning have significantly contributed to computational pathology (CPath) by converting traditional histopathological slides into whole slide images (WSIs). This development enables comprehensive digital reviews by pathologists and accelerates AI-driven diagnostic support for WSI analysis. Recent advances in foundational pathology models have increased the need for&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10752v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10752v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10752v1-abstract-full" style="display: none;"> Advances in optical microscopy scanning have significantly contributed to computational pathology (CPath) by converting traditional histopathological slides into whole slide images (WSIs). This development enables comprehensive digital reviews by pathologists and accelerates AI-driven diagnostic support for WSI analysis. Recent advances in foundational pathology models have increased the need for benchmarking tasks. The Camelyon series is one of the most widely used open-source datasets in computational pathology. However, the quality, accessibility, and clinical relevance of the labels have not been comprehensively evaluated. In this study, we reprocessed 1,399 WSIs and labels from the Camelyon-16 and Camelyon-17 datasets, removing low-quality slides, correcting erroneous labels, and providing expert pixel annotations for tumor regions in the previously unreleased test set. Based on the sizes of re-annotated tumor regions, we upgraded the binary cancer screening task to a four-class task: negative, micro-metastasis, macro-metastasis, and Isolated Tumor Cells (ITC). We reevaluated pre-trained pathology feature extractors and multiple instance learning (MIL) methods using the cleaned dataset, providing a benchmark that advances AI development in histopathology. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10752v1-abstract-full').style.display = 'none'; document.getElementById('2411.10752v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10584">arXiv:2411.10584</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10584">pdf</a>, <a href="https://arxiv.org/format/2411.10584">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="General Economics">econ.GN</span> </div> </div> <p class="title is-5 mathjax"> Social Learning in Lung Transplant Decision </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Doval%2C+L">Laura Doval</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+F+E+W">Federico Echenique Wanying Huang</a>, <a href="/search/?searchtype=author&amp;query=Xin%2C+Y">Yi Xin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10584v1-abstract-short" style="display: inline;"> We study the allocation of deceased-donor lungs to patients in need of a transplant. Patients make sequential decisions in an order dictated by a priority policy. Using data from a prominent Organ Procurement Organization in the United States, we provide reduced-form evidence of social learning: because patients accept or reject organs in sequence, their decisions exhibit herding behavior, often r&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10584v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10584v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10584v1-abstract-full" style="display: none;"> We study the allocation of deceased-donor lungs to patients in need of a transplant. Patients make sequential decisions in an order dictated by a priority policy. Using data from a prominent Organ Procurement Organization in the United States, we provide reduced-form evidence of social learning: because patients accept or reject organs in sequence, their decisions exhibit herding behavior, often rejecting an organ that would otherwise be accepted. We develop and estimate a structural model to quantify the impact of various policy proposals and informational regimes. Our results show that blinding patients to their position in the sequence\textemdash thereby eliminating social learning\textemdash boosts organ allocation but reduces average utility per patient. In contrast, prioritizing patients by their likelihood of acceptance exacerbates social learning, leading to fewer organ allocations. Nevertheless, it raises utility per accepted organ and expedites the allocation process. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10584v1-abstract-full').style.display = 'none'; document.getElementById('2411.10584v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10219">arXiv:2411.10219</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10219">pdf</a>, <a href="https://arxiv.org/format/2411.10219">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Constraints on the photon polarisation in $b \to s 纬$ transitions using $B_s^0 \rightarrow 蠁e^+e^-$ decays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=LHCb+collaboration"> LHCb collaboration</a>, <a href="/search/?searchtype=author&amp;query=Aaij%2C+R">R. Aaij</a>, <a href="/search/?searchtype=author&amp;query=Abdelmotteleb%2C+A+S+W">A. S. W. Abdelmotteleb</a>, <a href="/search/?searchtype=author&amp;query=Beteta%2C+C+A">C. Abellan Beteta</a>, <a href="/search/?searchtype=author&amp;query=Abudin%C3%A9n%2C+F">F. Abudin茅n</a>, <a href="/search/?searchtype=author&amp;query=Ackernley%2C+T">T. Ackernley</a>, <a href="/search/?searchtype=author&amp;query=Adefisoye%2C+A+A">A. A. Adefisoye</a>, <a href="/search/?searchtype=author&amp;query=Adeva%2C+B">B. Adeva</a>, <a href="/search/?searchtype=author&amp;query=Adinolfi%2C+M">M. Adinolfi</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Agapopoulou%2C+C">C. Agapopoulou</a>, <a href="/search/?searchtype=author&amp;query=Aidala%2C+C+A">C. A. Aidala</a>, <a href="/search/?searchtype=author&amp;query=Ajaltouni%2C+Z">Z. Ajaltouni</a>, <a href="/search/?searchtype=author&amp;query=Akar%2C+S">S. Akar</a>, <a href="/search/?searchtype=author&amp;query=Akiba%2C+K">K. Akiba</a>, <a href="/search/?searchtype=author&amp;query=Albicocco%2C+P">P. Albicocco</a>, <a href="/search/?searchtype=author&amp;query=Albrecht%2C+J">J. Albrecht</a>, <a href="/search/?searchtype=author&amp;query=Alessio%2C+F">F. Alessio</a>, <a href="/search/?searchtype=author&amp;query=Alexander%2C+M">M. Alexander</a>, <a href="/search/?searchtype=author&amp;query=Aliouche%2C+Z">Z. Aliouche</a>, <a href="/search/?searchtype=author&amp;query=Cartelle%2C+P+A">P. Alvarez Cartelle</a>, <a href="/search/?searchtype=author&amp;query=Amalric%2C+R">R. Amalric</a>, <a href="/search/?searchtype=author&amp;query=Amato%2C+S">S. Amato</a>, <a href="/search/?searchtype=author&amp;query=Amey%2C+J+L">J. L. Amey</a>, <a href="/search/?searchtype=author&amp;query=Amhis%2C+Y">Y. Amhis</a> , et al. (1120 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10219v2-abstract-short" style="display: inline;"> An angular analysis of the $B_s^0 \rightarrow 蠁e^+e^-$ decay is performed using the proton-proton collision dataset collected between 2011 and 2018 by the LHCb experiment, corresponding to an integrated luminosity of $9\,{\rm fb}^{-1}$ at centre-of-mass energies of 7, 8 and $13\,{\rm TeV}$. The analysis is performed in the very low dielectron invariant mass-squared region between $0.0009$ and&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10219v2-abstract-full').style.display = 'inline'; document.getElementById('2411.10219v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10219v2-abstract-full" style="display: none;"> An angular analysis of the $B_s^0 \rightarrow 蠁e^+e^-$ decay is performed using the proton-proton collision dataset collected between 2011 and 2018 by the LHCb experiment, corresponding to an integrated luminosity of $9\,{\rm fb}^{-1}$ at centre-of-mass energies of 7, 8 and $13\,{\rm TeV}$. The analysis is performed in the very low dielectron invariant mass-squared region between $0.0009$ and $0.2615\,{\rm GeV}^2\!/c^4$. The longitudinal polarisation fraction of the $蠁$ meson is measured to be less than $11.5\%$ at $90\%$ confidence level. The $A_{\mathrm{T}}^{\mathcal{R}e C\!P}$ observable, which is related to the lepton forward-backward asymmetry, is measured to be $0.116 \pm 0.155 \pm 0.006$, where the first uncertainty is statistical and the second systematic. The transverse asymmetries, $A_{\mathrm{T}}^{(2)}$ and $A_{\mathrm{T}}^{\mathcal{I}m C\!P}$ , which are sensitive to the virtual photon polarisation, are found to be $-0.045 \pm 0.235 \pm 0.014$ and $0.002 \pm 0.247 \pm 0.016$, respectively. The results are consistent with Standard Model predictions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10219v2-abstract-full').style.display = 'none'; document.getElementById('2411.10219v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">21 pages, 4 figures. All figures and tables, along with any supplementary material and additional information, are available at https://lbfence.cern.ch/alcm/public/analysis/full-details/3433/ (LHCb public pages)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> LHCb-PAPER-2024-030, CERN-EP-2024-276 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09343">arXiv:2411.09343</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09343">pdf</a>, <a href="https://arxiv.org/format/2411.09343">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Nuclear Experiment">nucl-ex</span> </div> </div> <p class="title is-5 mathjax"> Measurement of $蠁(1020)$ meson production in fixed-target $\textit{p}$Ne collisions at $\sqrt{s_{NN}}$ = 68.5 GeV </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=LHCb+collaboration"> LHCb collaboration</a>, <a href="/search/?searchtype=author&amp;query=Aaij%2C+R">R. Aaij</a>, <a href="/search/?searchtype=author&amp;query=Abdelmotteleb%2C+A+S+W">A. S. W. Abdelmotteleb</a>, <a href="/search/?searchtype=author&amp;query=Beteta%2C+C+A">C. Abellan Beteta</a>, <a href="/search/?searchtype=author&amp;query=Abudin%C3%A9n%2C+F">F. Abudin茅n</a>, <a href="/search/?searchtype=author&amp;query=Ackernley%2C+T">T. Ackernley</a>, <a href="/search/?searchtype=author&amp;query=Adefisoye%2C+A+A">A. A. Adefisoye</a>, <a href="/search/?searchtype=author&amp;query=Adeva%2C+B">B. Adeva</a>, <a href="/search/?searchtype=author&amp;query=Adinolfi%2C+M">M. Adinolfi</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Agapopoulou%2C+C">C. Agapopoulou</a>, <a href="/search/?searchtype=author&amp;query=Aidala%2C+C+A">C. A. Aidala</a>, <a href="/search/?searchtype=author&amp;query=Ajaltouni%2C+Z">Z. Ajaltouni</a>, <a href="/search/?searchtype=author&amp;query=Akar%2C+S">S. Akar</a>, <a href="/search/?searchtype=author&amp;query=Akiba%2C+K">K. Akiba</a>, <a href="/search/?searchtype=author&amp;query=Albicocco%2C+P">P. Albicocco</a>, <a href="/search/?searchtype=author&amp;query=Albrecht%2C+J">J. Albrecht</a>, <a href="/search/?searchtype=author&amp;query=Alessio%2C+F">F. Alessio</a>, <a href="/search/?searchtype=author&amp;query=Alexander%2C+M">M. Alexander</a>, <a href="/search/?searchtype=author&amp;query=Aliouche%2C+Z">Z. Aliouche</a>, <a href="/search/?searchtype=author&amp;query=Cartelle%2C+P+A">P. Alvarez Cartelle</a>, <a href="/search/?searchtype=author&amp;query=Amalric%2C+R">R. Amalric</a>, <a href="/search/?searchtype=author&amp;query=Amato%2C+S">S. Amato</a>, <a href="/search/?searchtype=author&amp;query=Amey%2C+J+L">J. L. Amey</a>, <a href="/search/?searchtype=author&amp;query=Amhis%2C+Y">Y. Amhis</a> , et al. (1127 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09343v1-abstract-short" style="display: inline;"> The first measurement of $蠁(1020)$ meson production in fixed-target $p$Ne collisions at $\sqrt{s_{NN}}=68.5$ GeV is presented. The $蠁(1020)$ mesons are reconstructed in their $K^{+}K^{-}$ decay in a data sample consisting of proton collisions on neon nuclei at rest, corresponding to an integrated luminosity of $21.7 \pm 1.4$ nb$^{-1}$, collected by the LHCb detector at CERN. The $蠁(1020)$ producti&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09343v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09343v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09343v1-abstract-full" style="display: none;"> The first measurement of $蠁(1020)$ meson production in fixed-target $p$Ne collisions at $\sqrt{s_{NN}}=68.5$ GeV is presented. The $蠁(1020)$ mesons are reconstructed in their $K^{+}K^{-}$ decay in a data sample consisting of proton collisions on neon nuclei at rest, corresponding to an integrated luminosity of $21.7 \pm 1.4$ nb$^{-1}$, collected by the LHCb detector at CERN. The $蠁(1020)$ production cross-section in the centre-of-mass rapidity range of $-1.8&lt;y^*&lt;0$ and transverse momentum range of $800&lt;p_{T}&lt;6500$ MeV/c is found to be $蟽=182.7\pm2.7~\text{(stat.)}\pm14.1~\text{(syst)}~渭$b/nucleon. A double-differential measurement of the cross-section is also provided in four regions of rapidity and six regions of transverse momentum of the $蠁(1020)$ meson and compared with the predictions from Pythia and EPOS4, which are found to underestimate the experimental values. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09343v1-abstract-full').style.display = 'none'; document.getElementById('2411.09343v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">All figures and tables, along with machine-readable versions and any supplementary material and additional information, are available at https://lbfence.cern.ch/alcm/public/analysis/full-details/3673/ (LHCb public pages)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> LHCb-PAPER-2024-036, CERN-EP-2024-274 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09121">arXiv:2411.09121</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09121">pdf</a>, <a href="https://arxiv.org/format/2411.09121">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Logic in Computer Science">cs.LO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Formal Languages and Automata Theory">cs.FL</span> </div> </div> <p class="title is-5 mathjax"> AutoQ 2.0: From Verification of Quantum Circuits to Verification of Quantum Programs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Chen%2C+Y">Yu-Fang Chen</a>, <a href="/search/?searchtype=author&amp;query=Chung%2C+K">Kai-Min Chung</a>, <a href="/search/?searchtype=author&amp;query=Hsieh%2C+M">Min-Hsiu Hsieh</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wei-Jia Huang</a>, <a href="/search/?searchtype=author&amp;query=Leng%C3%A1l%2C+O">Ond艡ej Leng谩l</a>, <a href="/search/?searchtype=author&amp;query=Lin%2C+J">Jyun-Ao Lin</a>, <a href="/search/?searchtype=author&amp;query=Tsai%2C+W">Wei-Lun Tsai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09121v1-abstract-short" style="display: inline;"> We present a verifier of quantum programs called AutoQ 2.0. Quantum programs extend quantum circuits (the domain of AutoQ 1.0) by classical control flow constructs, which enable users to describe advanced quantum algorithms in a formal and precise manner. The extension is highly non-trivial, as we needed to tackle both theoretical challenges (such as the treatment of measurement, the normalization&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09121v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09121v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09121v1-abstract-full" style="display: none;"> We present a verifier of quantum programs called AutoQ 2.0. Quantum programs extend quantum circuits (the domain of AutoQ 1.0) by classical control flow constructs, which enable users to describe advanced quantum algorithms in a formal and precise manner. The extension is highly non-trivial, as we needed to tackle both theoretical challenges (such as the treatment of measurement, the normalization problem, and lifting techniques for verification of classical programs with loops to the quantum world), and engineering issues (such as extending the input format with a~support for specifying loop invariants). We have successfully used AutoQ 2.0 to verify two types of advanced quantum programs that cannot be expressed using only quantum circuits: the \emph{repeat-until-success} (RUS) algorithm and the weak-measurement-based version of Grover&#39;s search algorithm. AutoQ 2.0 can efficiently verify all our benchmarks: all RUS algorithms were verified instantly and, for the weak-measurement-based version of Grover&#39;s search, we were able to handle the case of 100 qubits in $\sim$20 minutes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09121v1-abstract-full').style.display = 'none'; document.getElementById('2411.09121v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">regular tool paper submitted to TACAS 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.08997">arXiv:2411.08997</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.08997">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Fluid Dynamics">physics.flu-dyn</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Biological Physics">physics.bio-ph</span> </div> </div> <p class="title is-5 mathjax"> Aerodynamic Significance of Mass Distribution on Samara Descent </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Hou%2C+Z">Zhao-Bang Hou</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+J">Jun-Duo Zhang</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+Y">Yun-Da Li</a>, <a href="/search/?searchtype=author&amp;query=Jia%2C+Y">Yong-Xia Jia</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wei-Xi Huang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.08997v1-abstract-short" style="display: inline;"> Samaras, a distinct category of fruit, are composed of heavier seeds and lighter wings. Diversity in morphologies and structures subtly contributes to the flight patterns of various seeds, thereby serving as a key factor in the reproductive strategies of plants. To explore the mechanisms underlying various samara flight behaviors, we proposed an effective scheme by manipulating the mass distributi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08997v1-abstract-full').style.display = 'inline'; document.getElementById('2411.08997v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.08997v1-abstract-full" style="display: none;"> Samaras, a distinct category of fruit, are composed of heavier seeds and lighter wings. Diversity in morphologies and structures subtly contributes to the flight patterns of various seeds, thereby serving as a key factor in the reproductive strategies of plants. To explore the mechanisms underlying various samara flight behaviors, we proposed an effective scheme by manipulating the mass distribution on a plate to mimic various three-dimensional descent behaviors of samaras. Through this framework, we experimentally identified and characterized four distinct flight modes. The three-dimensional vortical structures were then numerically analyzed to gain insights into the samara-inspired flight behaviors. Our study demonstrates how strategic mass distribution in samaras leads to diverse flight behaviors that leverage vortices to enhance seed dispersal, offering a fresh perspective for the design of biomimetic fliers. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08997v1-abstract-full').style.display = 'none'; document.getElementById('2411.08997v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.08577">arXiv:2411.08577</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.08577">pdf</a>, <a href="https://arxiv.org/format/2411.08577">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Astrophysical Phenomena">astro-ph.HE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cosmology and Nongalactic Astrophysics">astro-ph.CO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Phenomenology">hep-ph</span> </div> </div> <p class="title is-5 mathjax"> Constraining Axion-Like Particles from observations of AGN B2 2234+28A and 3C 454.3 </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Chen%2C+Y">Yu-Chong Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+S">Siyu Chen</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wei-Cong Huang</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+Q">Qing Yang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+H">Hong-Hao Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.08577v1-abstract-short" style="display: inline;"> Axion-photon oscillation effect provides a possible explanation for the presence of very-high-energy (VHE) $纬$-ray signals from distant sources. In this work, we propose a model-dependent method to select possible sources that may give sufficient constraints on the axion parameters. We investigate such effect in the spectra of active galactic nuclei (AGN) B2 2234+28A and 3C 454.3 based on data obt&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08577v1-abstract-full').style.display = 'inline'; document.getElementById('2411.08577v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.08577v1-abstract-full" style="display: none;"> Axion-photon oscillation effect provides a possible explanation for the presence of very-high-energy (VHE) $纬$-ray signals from distant sources. In this work, we propose a model-dependent method to select possible sources that may give sufficient constraints on the axion parameters. We investigate such effect in the spectra of active galactic nuclei (AGN) B2 2234+28A and 3C 454.3 based on data obtained from Fermi Large Area Telescope (Fermi-LAT) and MAGIC U.L. We utilize the Markov Chain Monte Carlo method to fit the axion parameters, yielding a result of $g_{a纬}=3.05^{+0.51}_{-0.31} \times 10^{-11}$ GeV$^{-1}$ for the axion-photon coupling strength and $m_{a}=5.25^{+2.35}_{-2.65} \times 10^{-8} $ eV for the axion mass. We also perform 95\% confidence level (CL) constraints to set an upper limit for $g_{a纬}$. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08577v1-abstract-full').style.display = 'none'; document.getElementById('2411.08577v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 10 figures, 1 table</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.08437">arXiv:2411.08437</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.08437">pdf</a>, <a href="https://arxiv.org/format/2411.08437">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> </div> </div> <p class="title is-5 mathjax"> Evolutionary Algorithm with Detection Region Method for Constrained Multi-Objective Problems with Binary Constraints </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Weixiong Huang</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+R">Rui Wang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+T">Tao Zhang</a>, <a href="/search/?searchtype=author&amp;query=Qi%2C+S">Sheng Qi</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+L">Ling Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.08437v1-abstract-short" style="display: inline;"> Solving constrained multi-objective optimization problems (CMOPs) is a challenging task. While many practical algorithms have been developed to tackle CMOPs, real-world scenarios often present cases where the constraint functions are unknown or unquantifiable, resulting in only binary outcomes (feasible or infeasible). This limitation reduces the effectiveness of constraint violation guidance, whi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08437v1-abstract-full').style.display = 'inline'; document.getElementById('2411.08437v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.08437v1-abstract-full" style="display: none;"> Solving constrained multi-objective optimization problems (CMOPs) is a challenging task. While many practical algorithms have been developed to tackle CMOPs, real-world scenarios often present cases where the constraint functions are unknown or unquantifiable, resulting in only binary outcomes (feasible or infeasible). This limitation reduces the effectiveness of constraint violation guidance, which can negatively impact the performance of existing algorithms that rely on this approach. Such challenges are particularly detrimental for algorithms employing the epsilon-based method, as they hinder effective relaxation of the feasible region. To address these challenges, this paper proposes a novel algorithm called DRMCMO based on the detection region method. In DRMCMO, detection regions dynamic monitor feasible solutions to enhance convergence, helping the population escape local optima. Additionally, these regions collaborate with the neighbor pairing strategy to improve population diversity within narrow feasible areas. We have modified three existing test suites to serve as benchmark test problems for CMOPs with binary constraints(CMOP/BC) and conducted comprehensive comparative experiments with state-of-the-art algorithms on these test suites and real-world problems. The results demonstrate the strong competitiveness of DRMCMO against state-of-the-art algorithms. Given the limited research on CMOP/BC, our study offers a new perspective for advancing this field. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08437v1-abstract-full').style.display = 'none'; document.getElementById('2411.08437v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.07294">arXiv:2411.07294</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.07294">pdf</a>, <a href="https://arxiv.org/format/2411.07294">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Phenomenology">hep-ph</span> </div> </div> <p class="title is-5 mathjax"> Right-Handed Neutrino Masses from the Electroweak Scale </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Batell%2C+B">Brian Batell</a>, <a href="/search/?searchtype=author&amp;query=Bhoonah%2C+A">Amit Bhoonah</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wenjie Huang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.07294v1-abstract-short" style="display: inline;"> Heavy right-handed neutrinos are highly motivated due to their connection with the origin of neutrino masses via the seesaw mechanism. If the right-handed neutrino Majorana mass is at or below the weak scale, direct experimental discovery of these states is possible in laboratory experiments. However, there is no a priori basis to expect right-handed neutrinos to be so light since the Majorana mas&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07294v1-abstract-full').style.display = 'inline'; document.getElementById('2411.07294v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.07294v1-abstract-full" style="display: none;"> Heavy right-handed neutrinos are highly motivated due to their connection with the origin of neutrino masses via the seesaw mechanism. If the right-handed neutrino Majorana mass is at or below the weak scale, direct experimental discovery of these states is possible in laboratory experiments. However, there is no a priori basis to expect right-handed neutrinos to be so light since the Majorana mass is a technically natural parameter and could comfortably reside at any scale, including at scales far above the weak scale. Here we explore the possibility that the right-handed neutrino Majorana mass originates from electroweak symmetry breaking. Working within an effective theory with two Higgs doublets, nonzero lepton number is assigned to the bilinear operator built from the two Higgs fields, which is then coupled to the right-handed neutrino mass operator. In tandem with the neutrino Yukawa coupling, following electroweak symmetry breaking a seesaw mechanism operates, generating the light SM neutrino masses along with right-handed neutrinos with masses below the electroweak scale. This scenario leads to novel phenomenology in the Higgs sector, which may be probed at the LHC and at future colliders. There are also interesting prospects for neutrinoless double beta decay and lepton flavor violation. We also explore some theoretical aspects of the scenario, including the technical naturalness of the effective field theory and ultraviolet completions of the right-handed neutrino Majorana mass. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07294v1-abstract-full').style.display = 'none'; document.getElementById('2411.07294v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 7 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> PITT-PACC-2411 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.07111">arXiv:2411.07111</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.07111">pdf</a>, <a href="https://arxiv.org/format/2411.07111">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> Building a Taiwanese Mandarin Spoken Language Model: A First Attempt </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Yang%2C+C">Chih-Kai Yang</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Y">Yu-Kuan Fu</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+C">Chen-An Li</a>, <a href="/search/?searchtype=author&amp;query=Lin%2C+Y">Yi-Cheng Lin</a>, <a href="/search/?searchtype=author&amp;query=Lin%2C+Y">Yu-Xiang Lin</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+W">Wei-Chih Chen</a>, <a href="/search/?searchtype=author&amp;query=Chung%2C+H+L">Ho Lam Chung</a>, <a href="/search/?searchtype=author&amp;query=Kuan%2C+C">Chun-Yi Kuan</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wei-Ping Huang</a>, <a href="/search/?searchtype=author&amp;query=Lu%2C+K">Ke-Han Lu</a>, <a href="/search/?searchtype=author&amp;query=Lin%2C+T">Tzu-Quan Lin</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+H">Hsiu-Hsuan Wang</a>, <a href="/search/?searchtype=author&amp;query=Hu%2C+E">En-Pei Hu</a>, <a href="/search/?searchtype=author&amp;query=Hsu%2C+C">Chan-Jan Hsu</a>, <a href="/search/?searchtype=author&amp;query=Tseng%2C+L">Liang-Hsuan Tseng</a>, <a href="/search/?searchtype=author&amp;query=Chiu%2C+I">I-Hsiang Chiu</a>, <a href="/search/?searchtype=author&amp;query=Sanga%2C+U">Ulin Sanga</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xuanjun Chen</a>, <a href="/search/?searchtype=author&amp;query=Hsu%2C+P">Po-chun Hsu</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+S">Shu-wen Yang</a>, <a href="/search/?searchtype=author&amp;query=Lee%2C+H">Hung-yi Lee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.07111v1-abstract-short" style="display: inline;"> This technical report presents our initial attempt to build a spoken large language model (LLM) for Taiwanese Mandarin, specifically tailored to enable real-time, speech-to-speech interaction in multi-turn conversations. Our end-to-end model incorporates a decoder-only transformer architecture and aims to achieve seamless interaction while preserving the conversational flow, including full-duplex&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07111v1-abstract-full').style.display = 'inline'; document.getElementById('2411.07111v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.07111v1-abstract-full" style="display: none;"> This technical report presents our initial attempt to build a spoken large language model (LLM) for Taiwanese Mandarin, specifically tailored to enable real-time, speech-to-speech interaction in multi-turn conversations. Our end-to-end model incorporates a decoder-only transformer architecture and aims to achieve seamless interaction while preserving the conversational flow, including full-duplex capabilities allowing simultaneous speaking and listening. The paper also details the training process, including data preparation with synthesized dialogues and adjustments for real-time interaction. We also developed a platform to evaluate conversational fluency and response coherence in multi-turn dialogues. We hope the release of the report can contribute to the future development of spoken LLMs in Taiwanese Mandarin. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07111v1-abstract-full').style.display = 'none'; document.getElementById('2411.07111v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Work in progress</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.06976">arXiv:2411.06976</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.06976">pdf</a>, <a href="https://arxiv.org/format/2411.06976">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> </div> </div> <p class="title is-5 mathjax"> A Hierarchical Compression Technique for 3D Gaussian Splatting Compression </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Huang%2C+H">He Huang</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wenjie Huang</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+Q">Qi Yang</a>, <a href="/search/?searchtype=author&amp;query=Xu%2C+Y">Yiling Xu</a>, <a href="/search/?searchtype=author&amp;query=li%2C+Z">Zhu li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.06976v1-abstract-short" style="display: inline;"> 3D Gaussian Splatting (GS) demonstrates excellent rendering quality and generation speed in novel view synthesis. However, substantial data size poses challenges for storage and transmission, making 3D GS compression an essential technology. Current 3D GS compression research primarily focuses on developing more compact scene representations, such as converting explicit 3D GS data into implicit fo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06976v1-abstract-full').style.display = 'inline'; document.getElementById('2411.06976v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.06976v1-abstract-full" style="display: none;"> 3D Gaussian Splatting (GS) demonstrates excellent rendering quality and generation speed in novel view synthesis. However, substantial data size poses challenges for storage and transmission, making 3D GS compression an essential technology. Current 3D GS compression research primarily focuses on developing more compact scene representations, such as converting explicit 3D GS data into implicit forms. In contrast, compression of the GS data itself has hardly been explored. To address this gap, we propose a Hierarchical GS Compression (HGSC) technique. Initially, we prune unimportant Gaussians based on importance scores derived from both global and local significance, effectively reducing redundancy while maintaining visual quality. An Octree structure is used to compress 3D positions. Based on the 3D GS Octree, we implement a hierarchical attribute compression strategy by employing a KD-tree to partition the 3D GS into multiple blocks. We apply farthest point sampling to select anchor primitives within each block and others as non-anchor primitives with varying Levels of Details (LoDs). Anchor primitives serve as reference points for predicting non-anchor primitives across different LoDs to reduce spatial redundancy. For anchor primitives, we use the region adaptive hierarchical transform to achieve near-lossless compression of various attributes. For non-anchor primitives, each is predicted based on the k-nearest anchor primitives. To further minimize prediction errors, the reconstructed LoD and anchor primitives are combined to form new anchor primitives to predict the next LoD. Our method notably achieves superior compression quality and a significant data size reduction of over 4.5 times compared to the state-of-the-art compression method on small scenes datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06976v1-abstract-full').style.display = 'none'; document.getElementById('2411.06976v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.06971">arXiv:2411.06971</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.06971">pdf</a>, <a href="https://arxiv.org/format/2411.06971">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> MapSAM: Adapting Segment Anything Model for Automated Feature Detection in Historical Maps </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Xia%2C+X">Xue Xia</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+D">Daiwei Zhang</a>, <a href="/search/?searchtype=author&amp;query=Song%2C+W">Wenxuan Song</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wei Huang</a>, <a href="/search/?searchtype=author&amp;query=Hurni%2C+L">Lorenz Hurni</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.06971v1-abstract-short" style="display: inline;"> Automated feature detection in historical maps can significantly accelerate the reconstruction of the geospatial past. However, this process is often constrained by the time-consuming task of manually digitizing sufficient high-quality training data. The emergence of visual foundation models, such as the Segment Anything Model (SAM), offers a promising solution due to their remarkable generalizati&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06971v1-abstract-full').style.display = 'inline'; document.getElementById('2411.06971v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.06971v1-abstract-full" style="display: none;"> Automated feature detection in historical maps can significantly accelerate the reconstruction of the geospatial past. However, this process is often constrained by the time-consuming task of manually digitizing sufficient high-quality training data. The emergence of visual foundation models, such as the Segment Anything Model (SAM), offers a promising solution due to their remarkable generalization capabilities and rapid adaptation to new data distributions. Despite this, directly applying SAM in a zero-shot manner to historical map segmentation poses significant challenges, including poor recognition of certain geospatial features and a reliance on input prompts, which limits its ability to be fully automated. To address these challenges, we introduce MapSAM, a parameter-efficient fine-tuning strategy that adapts SAM into a prompt-free and versatile solution for various downstream historical map segmentation tasks. Specifically, we employ Weight-Decomposed Low-Rank Adaptation (DoRA) to integrate domain-specific knowledge into the image encoder. Additionally, we develop an automatic prompt generation process, eliminating the need for manual input. We further enhance the positional prompt in SAM, transforming it into a higher-level positional-semantic prompt, and modify the cross-attention mechanism in the mask decoder with masked attention for more effective feature aggregation. The proposed MapSAM framework demonstrates promising performance across two distinct historical map segmentation tasks: one focused on linear features and the other on areal features. Experimental results show that it adapts well to various features, even when fine-tuned with extremely limited data (e.g. 10 shots). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06971v1-abstract-full').style.display = 'none'; document.getElementById('2411.06971v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.06686">arXiv:2411.06686</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.06686">pdf</a>, <a href="https://arxiv.org/format/2411.06686">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SeedEdit: Align Image Re-Generation to Image Editing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Shi%2C+Y">Yichun Shi</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+P">Peng Wang</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Weilin Huang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.06686v1-abstract-short" style="display: inline;"> We introduce SeedEdit, a diffusion model that is able to revise a given image with any text prompt. In our perspective, the key to such a task is to obtain an optimal balance between maintaining the original image, i.e. image reconstruction, and generating a new image, i.e. image re-generation. To this end, we start from a weak generator (text-to-image model) that creates diverse pairs between suc&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06686v1-abstract-full').style.display = 'inline'; document.getElementById('2411.06686v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.06686v1-abstract-full" style="display: none;"> We introduce SeedEdit, a diffusion model that is able to revise a given image with any text prompt. In our perspective, the key to such a task is to obtain an optimal balance between maintaining the original image, i.e. image reconstruction, and generating a new image, i.e. image re-generation. To this end, we start from a weak generator (text-to-image model) that creates diverse pairs between such two directions and gradually align it into a strong image editor that well balances between the two tasks. SeedEdit can achieve more diverse and stable editing capability over prior image editing methods, enabling sequential revision over images generated by diffusion models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06686v1-abstract-full').style.display = 'none'; document.getElementById('2411.06686v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Our website: https://team.doubao.com/seededit</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.05669">arXiv:2411.05669</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.05669">pdf</a>, <a href="https://arxiv.org/format/2411.05669">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Nuclear Experiment">nucl-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Measurement of the $蠄(2S)$ to $J/蠄$ cross-section ratio as a function of centrality in PbPb collisions at $\sqrt{s_{\text{NN}}}$ = 5.02 TeV </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=LHCb+collaboration"> LHCb collaboration</a>, <a href="/search/?searchtype=author&amp;query=Aaij%2C+R">R. Aaij</a>, <a href="/search/?searchtype=author&amp;query=Abdelmotteleb%2C+A+S+W">A. S. W. Abdelmotteleb</a>, <a href="/search/?searchtype=author&amp;query=Beteta%2C+C+A">C. Abellan Beteta</a>, <a href="/search/?searchtype=author&amp;query=Abudin%C3%A9n%2C+F">F. Abudin茅n</a>, <a href="/search/?searchtype=author&amp;query=Ackernley%2C+T">T. Ackernley</a>, <a href="/search/?searchtype=author&amp;query=Adefisoye%2C+A+A">A. A. Adefisoye</a>, <a href="/search/?searchtype=author&amp;query=Adeva%2C+B">B. Adeva</a>, <a href="/search/?searchtype=author&amp;query=Adinolfi%2C+M">M. Adinolfi</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Agapopoulou%2C+C">C. Agapopoulou</a>, <a href="/search/?searchtype=author&amp;query=Aidala%2C+C+A">C. A. Aidala</a>, <a href="/search/?searchtype=author&amp;query=Ajaltouni%2C+Z">Z. Ajaltouni</a>, <a href="/search/?searchtype=author&amp;query=Akar%2C+S">S. Akar</a>, <a href="/search/?searchtype=author&amp;query=Akiba%2C+K">K. Akiba</a>, <a href="/search/?searchtype=author&amp;query=Albicocco%2C+P">P. Albicocco</a>, <a href="/search/?searchtype=author&amp;query=Albrecht%2C+J">J. Albrecht</a>, <a href="/search/?searchtype=author&amp;query=Alessio%2C+F">F. Alessio</a>, <a href="/search/?searchtype=author&amp;query=Alexander%2C+M">M. Alexander</a>, <a href="/search/?searchtype=author&amp;query=Aliouche%2C+Z">Z. Aliouche</a>, <a href="/search/?searchtype=author&amp;query=Cartelle%2C+P+A">P. Alvarez Cartelle</a>, <a href="/search/?searchtype=author&amp;query=Amalric%2C+R">R. Amalric</a>, <a href="/search/?searchtype=author&amp;query=Amato%2C+S">S. Amato</a>, <a href="/search/?searchtype=author&amp;query=Amey%2C+J+L">J. L. Amey</a>, <a href="/search/?searchtype=author&amp;query=Amhis%2C+Y">Y. Amhis</a> , et al. (1128 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.05669v1-abstract-short" style="display: inline;"> The dissociation of quarkonium states with different binding energies produced in heavy-ion collisions is a powerful probe for investigating the formation and properties of the quark-gluon plasma. The ratio of production cross-sections of $蠄(2S)$ and $J/蠄$ mesons times the ratio of their branching fractions into the dimuon final state is measured as a function of centrality using data collected by&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05669v1-abstract-full').style.display = 'inline'; document.getElementById('2411.05669v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.05669v1-abstract-full" style="display: none;"> The dissociation of quarkonium states with different binding energies produced in heavy-ion collisions is a powerful probe for investigating the formation and properties of the quark-gluon plasma. The ratio of production cross-sections of $蠄(2S)$ and $J/蠄$ mesons times the ratio of their branching fractions into the dimuon final state is measured as a function of centrality using data collected by the LHCb detector in PbPb collisions at $\sqrt{s_{\text{NN}}}$ = 5.02 TeV. The measured ratio shows no dependence on the collision centrality, and is compared to the latest theory predictions and to the recent measurements in literature. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05669v1-abstract-full').style.display = 'none'; document.getElementById('2411.05669v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">All figures and tables, along with any supplementary material and additional information, are available at https://cern.ch/lhcbproject/Publications/p/LHCb-PAPER-2024-041.html (LHCb public pages)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> CERN-EP-2024-272, LHCb-PAPER-2024-041 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.04997">arXiv:2411.04997</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.04997">pdf</a>, <a href="https://arxiv.org/format/2411.04997">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> LLM2CLIP: Powerful Language Model Unlocks Richer Visual Representation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Weiquan Huang</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+A">Aoqi Wu</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+Y">Yifan Yang</a>, <a href="/search/?searchtype=author&amp;query=Luo%2C+X">Xufang Luo</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+Y">Yuqing Yang</a>, <a href="/search/?searchtype=author&amp;query=Hu%2C+L">Liang Hu</a>, <a href="/search/?searchtype=author&amp;query=Dai%2C+Q">Qi Dai</a>, <a href="/search/?searchtype=author&amp;query=Dai%2C+X">Xiyang Dai</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+D">Dongdong Chen</a>, <a href="/search/?searchtype=author&amp;query=Luo%2C+C">Chong Luo</a>, <a href="/search/?searchtype=author&amp;query=Qiu%2C+L">Lili Qiu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.04997v3-abstract-short" style="display: inline;"> CLIP is a foundational multimodal model that aligns image and text features into a shared space using contrastive learning on large-scale image-text pairs. Its strength lies in leveraging natural language as a rich supervisory signal. With the rapid progress of large language models (LLMs), we explore their potential to further enhance CLIP&#39;s multimodal representation learning. This work introduce&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04997v3-abstract-full').style.display = 'inline'; document.getElementById('2411.04997v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.04997v3-abstract-full" style="display: none;"> CLIP is a foundational multimodal model that aligns image and text features into a shared space using contrastive learning on large-scale image-text pairs. Its strength lies in leveraging natural language as a rich supervisory signal. With the rapid progress of large language models (LLMs), we explore their potential to further enhance CLIP&#39;s multimodal representation learning. This work introduces a fine-tuning approach that integrates LLMs with the pretrained CLIP visual encoder, leveraging LLMs&#39; advanced text understanding and open-world knowledge to improve CLIP&#39;s ability to process long and complex captions. To address the challenge of LLMs&#39; autoregressive nature, we propose a caption-to-caption contrastive learning framework to enhance the discriminative power of their outputs. Our method achieves substantial performance gains on various downstream tasks, demonstrating the effectiveness of combining LLMs with CLIP for enhanced multimodal learning. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.04997v3-abstract-full').style.display = 'none'; document.getElementById('2411.04997v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.03715">arXiv:2411.03715</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.03715">pdf</a>, <a href="https://arxiv.org/format/2411.03715">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> MOS-Bench: Benchmarking Generalization Abilities of Subjective Speech Quality Assessment Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wen-Chin Huang</a>, <a href="/search/?searchtype=author&amp;query=Cooper%2C+E">Erica Cooper</a>, <a href="/search/?searchtype=author&amp;query=Toda%2C+T">Tomoki Toda</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.03715v1-abstract-short" style="display: inline;"> Subjective speech quality assessment (SSQA) is critical for evaluating speech samples as perceived by human listeners. While model-based SSQA has enjoyed great success thanks to the development of deep neural networks (DNNs), generalization remains a key challenge, especially for unseen, out-of-domain data. To benchmark the generalization abilities of SSQA models, we present MOS-Bench, a diverse c&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03715v1-abstract-full').style.display = 'inline'; document.getElementById('2411.03715v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.03715v1-abstract-full" style="display: none;"> Subjective speech quality assessment (SSQA) is critical for evaluating speech samples as perceived by human listeners. While model-based SSQA has enjoyed great success thanks to the development of deep neural networks (DNNs), generalization remains a key challenge, especially for unseen, out-of-domain data. To benchmark the generalization abilities of SSQA models, we present MOS-Bench, a diverse collection of datasets. In addition, we also introduce SHEET, an open-source toolkit containing complete recipes to conduct SSQA experiments. We provided benchmark results for MOS-Bench, and we also explored multi-dataset training to enhance generalization. Additionally, we proposed a new performance metric, best score difference/ratio, and used latent space visualizations to explain model behavior, offering valuable insights for future research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03715v1-abstract-full').style.display = 'none'; document.getElementById('2411.03715v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submitted to Transactions on Audio, Speech and Language Processing. This work has been submitted to the IEEE for possible publication</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.03512">arXiv:2411.03512</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.03512">pdf</a>, <a href="https://arxiv.org/ps/2411.03512">ps</a>, <a href="https://arxiv.org/format/2411.03512">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Probability">math.PR</span> </div> </div> <p class="title is-5 mathjax"> Ergodicity and Mixing of Sublinear Expectation System and Applications </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wen Huang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+C">Chunlin Liu</a>, <a href="/search/?searchtype=author&amp;query=Peng%2C+S">Shige Peng</a>, <a href="/search/?searchtype=author&amp;query=Qu%2C+B">Baoyou Qu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.03512v1-abstract-short" style="display: inline;"> We utilize an ergodic theory framework to explore sublinear expectation theory. Specifically, we investigate the pointwise Birkhoff&#39;s ergodic theorem for invariant sublinear expectation systems. By further assuming that these sublinear expectation systems are ergodic, we derive stronger results. Furthermore, we relax the conditions for the law of large numbers and the strong law of large numbers u&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03512v1-abstract-full').style.display = 'inline'; document.getElementById('2411.03512v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.03512v1-abstract-full" style="display: none;"> We utilize an ergodic theory framework to explore sublinear expectation theory. Specifically, we investigate the pointwise Birkhoff&#39;s ergodic theorem for invariant sublinear expectation systems. By further assuming that these sublinear expectation systems are ergodic, we derive stronger results. Furthermore, we relax the conditions for the law of large numbers and the strong law of large numbers under sublinear expectations from independent and identical distribution to $伪$-mixing. These results can be applied to a class of stochastic differential equations driven by $G$-Brownian motion (i.e., $G$-SDEs), such as $G$-Ornstein-Uhlenbeck processes. As byproducts, we also obtain a series of applications for classical ergodic theory and capacity theory. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03512v1-abstract-full').style.display = 'none'; document.getElementById('2411.03512v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> Primary 37A25; 60G65; secondary 28A12; 60F17 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.03399">arXiv:2411.03399</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.03399">pdf</a>, <a href="https://arxiv.org/format/2411.03399">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Study of $D_{s1}(2460)^{+}\to D_{s}^{+}蟺^{+}蟺^{-}$ in $B\to {\bar{D}}^{(*)}D_{s}^{+}蟺^{+}蟺^{-}$ decays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=LHCb+collaboration"> LHCb collaboration</a>, <a href="/search/?searchtype=author&amp;query=Aaij%2C+R">R. Aaij</a>, <a href="/search/?searchtype=author&amp;query=Abdelmotteleb%2C+A+S+W">A. S. W. Abdelmotteleb</a>, <a href="/search/?searchtype=author&amp;query=Beteta%2C+C+A">C. Abellan Beteta</a>, <a href="/search/?searchtype=author&amp;query=Abudin%C3%A9n%2C+F">F. Abudin茅n</a>, <a href="/search/?searchtype=author&amp;query=Ackernley%2C+T">T. Ackernley</a>, <a href="/search/?searchtype=author&amp;query=Adefisoye%2C+A+A">A. A. Adefisoye</a>, <a href="/search/?searchtype=author&amp;query=Adeva%2C+B">B. Adeva</a>, <a href="/search/?searchtype=author&amp;query=Adinolfi%2C+M">M. Adinolfi</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Agapopoulou%2C+C">C. Agapopoulou</a>, <a href="/search/?searchtype=author&amp;query=Aidala%2C+C+A">C. A. Aidala</a>, <a href="/search/?searchtype=author&amp;query=Ajaltouni%2C+Z">Z. Ajaltouni</a>, <a href="/search/?searchtype=author&amp;query=Akar%2C+S">S. Akar</a>, <a href="/search/?searchtype=author&amp;query=Akiba%2C+K">K. Akiba</a>, <a href="/search/?searchtype=author&amp;query=Albicocco%2C+P">P. Albicocco</a>, <a href="/search/?searchtype=author&amp;query=Albrecht%2C+J">J. Albrecht</a>, <a href="/search/?searchtype=author&amp;query=Alessio%2C+F">F. Alessio</a>, <a href="/search/?searchtype=author&amp;query=Alexander%2C+M">M. Alexander</a>, <a href="/search/?searchtype=author&amp;query=Aliouche%2C+Z">Z. Aliouche</a>, <a href="/search/?searchtype=author&amp;query=Cartelle%2C+P+A">P. Alvarez Cartelle</a>, <a href="/search/?searchtype=author&amp;query=Amalric%2C+R">R. Amalric</a>, <a href="/search/?searchtype=author&amp;query=Amato%2C+S">S. Amato</a>, <a href="/search/?searchtype=author&amp;query=Amey%2C+J+L">J. L. Amey</a>, <a href="/search/?searchtype=author&amp;query=Amhis%2C+Y">Y. Amhis</a> , et al. (1124 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.03399v1-abstract-short" style="display: inline;"> An amplitude analysis of the $D_{s1}(2460)^+\to D_{s}^{+}蟺^{+}蟺^{-}$ transition is performed simultaneously in $B^{0}\to D^{-}D_{s}^{+}蟺^{+}蟺^{-}$, $B^{+}\to{\bar{D}}^{0} D_{s}^{+}蟺^{+}蟺^{-}$, and $B^{0}\to D^{*-}D_{s}^{+}蟺^{+}蟺^{-}$ decays. The study is based on a data sample of proton-proton collisions recorded with the LHCb detector at centre-of-mass energies of $\sqrt{s}=7,8,$ and $13\,$TeV, c&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03399v1-abstract-full').style.display = 'inline'; document.getElementById('2411.03399v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.03399v1-abstract-full" style="display: none;"> An amplitude analysis of the $D_{s1}(2460)^+\to D_{s}^{+}蟺^{+}蟺^{-}$ transition is performed simultaneously in $B^{0}\to D^{-}D_{s}^{+}蟺^{+}蟺^{-}$, $B^{+}\to{\bar{D}}^{0} D_{s}^{+}蟺^{+}蟺^{-}$, and $B^{0}\to D^{*-}D_{s}^{+}蟺^{+}蟺^{-}$ decays. The study is based on a data sample of proton-proton collisions recorded with the LHCb detector at centre-of-mass energies of $\sqrt{s}=7,8,$ and $13\,$TeV, corresponding to a total integrated luminosity of $9\,\rm{fb}^{-1}$. A clear double-peak structure is observed in the $m(蟺^{+}蟺^{-})$ spectrum of the $D_{s1}(2460)^{+}\to D_{s}^{+}蟺^{+}蟺^{-}$ decay. The data can be described either with a model including $f_0(500)$, $f_0(980)$ and $f_2(1270)$ resonances, in which the contributions of $f_0(980)$ and $f_2(1270)$ are unexpectedly large, or with a model including $f_0(500)$, a doubly charged open-charm tetraquark state $T_{c\bar{s}}^{++}$ and its isospin partner $T_{c\bar{s}}^{0}$. If the former is considered implausible, the $T_{c\bar{s}}$ states are observed with high significance, and the data are consistent with isospin symmetry. When imposing isospin constraints between the two $T_{c\bar{s}}$ states, their mass and width are determined to be $2327\pm13\pm13\,$MeV and $96\pm16\,^{+170}_{-23}\,$MeV, respectively, where the first uncertainty is statistical and the second is systematic. The mass is slightly below the $DK$ threshold, and a spin-parity of $0^+$ is favoured with high significance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03399v1-abstract-full').style.display = 'none'; document.getElementById('2411.03399v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">All figures and tables, along with machine-readable versions and any supplementary material and additional information, are available at https://lbfence.cern.ch/alcm/public/analysis/full-details/3280/ (LHCb public pages)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> LHCb-PAPER-2024-033, CERN-EP-2024-264 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.03114">arXiv:2411.03114</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.03114">pdf</a>, <a href="https://arxiv.org/format/2411.03114">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> </div> </div> <p class="title is-5 mathjax"> Investigating the Applicability of a Snapshot Computed Tomography Imaging Spectrometer for the Prediction of Brix and pH of Grapes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Peters%2C+M+S">Mads Svanborg Peters</a>, <a href="/search/?searchtype=author&amp;query=Ahleb%C3%A6k%2C+M+J">Mads Juul Ahleb忙k</a>, <a href="/search/?searchtype=author&amp;query=Frandsen%2C+M+T">Mads Toudal Frandsen</a>, <a href="/search/?searchtype=author&amp;query=J%C3%B8rgensen%2C+B">Bjarke J酶rgensen</a>, <a href="/search/?searchtype=author&amp;query=Jessen%2C+C+H">Christian Hald Jessen</a>, <a href="/search/?searchtype=author&amp;query=Carlsen%2C+A+K">Andreas Krogh Carlsen</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wei-Chih Huang</a>, <a href="/search/?searchtype=author&amp;query=Eriksen%2C+R+L">Ren茅 Lynge Eriksen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.03114v1-abstract-short" style="display: inline;"> In this paper, a recently developed snapshot hyperspectral imaging (HSI) system based on Computed Tomography Imaging Spectroscopy (CTIS) is utilized to determine Brix and pH values in Sheegene 20 table grapes through Partial Least Squares Regression (PLSR) modeling. The performance of the CTIS system is compared with that of a state-of-the-art line scan HSI system by imaging 100 grapes across both&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03114v1-abstract-full').style.display = 'inline'; document.getElementById('2411.03114v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.03114v1-abstract-full" style="display: none;"> In this paper, a recently developed snapshot hyperspectral imaging (HSI) system based on Computed Tomography Imaging Spectroscopy (CTIS) is utilized to determine Brix and pH values in Sheegene 20 table grapes through Partial Least Squares Regression (PLSR) modeling. The performance of the CTIS system is compared with that of a state-of-the-art line scan HSI system by imaging 100 grapes across both platforms. Reference measurements of Brix and pH values are obtained directly using a refractometer and a pH meter, as these parameters are essential for assessing the quality of table and wine grapes. The findings indicate that the spectra captured by the CTIS camera correlate well with the reference measurements, despite the system&#39;s narrower spectral range. The CTIS camera&#39;s advantages, including its lower cost, portability, and reduced susceptibility to motion errors, highlight its potential for promising in-field applications in grape quality assessment. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03114v1-abstract-full').style.display = 'none'; document.getElementById('2411.03114v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages, 10 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02837">arXiv:2411.02837</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.02837">pdf</a>, <a href="https://arxiv.org/format/2411.02837">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> On the Comparison between Multi-modal and Single-modal Contrastive Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wei Huang</a>, <a href="/search/?searchtype=author&amp;query=Han%2C+A">Andi Han</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+Y">Yongqiang Chen</a>, <a href="/search/?searchtype=author&amp;query=Cao%2C+Y">Yuan Cao</a>, <a href="/search/?searchtype=author&amp;query=Xu%2C+Z">Zhiqiang Xu</a>, <a href="/search/?searchtype=author&amp;query=Suzuki%2C+T">Taiji Suzuki</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02837v1-abstract-short" style="display: inline;"> Multi-modal contrastive learning with language supervision has presented a paradigm shift in modern machine learning. By pre-training on a web-scale dataset, multi-modal contrastive learning can learn high-quality representations that exhibit impressive robustness and transferability. Despite its empirical success, the theoretical understanding is still in its infancy, especially regarding its com&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02837v1-abstract-full').style.display = 'inline'; document.getElementById('2411.02837v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02837v1-abstract-full" style="display: none;"> Multi-modal contrastive learning with language supervision has presented a paradigm shift in modern machine learning. By pre-training on a web-scale dataset, multi-modal contrastive learning can learn high-quality representations that exhibit impressive robustness and transferability. Despite its empirical success, the theoretical understanding is still in its infancy, especially regarding its comparison with single-modal contrastive learning. In this work, we introduce a feature learning theory framework that provides a theoretical foundation for understanding the differences between multi-modal and single-modal contrastive learning. Based on a data generation model consisting of signal and noise, our analysis is performed on a ReLU network trained with the InfoMax objective function. Through a trajectory-based optimization analysis and generalization characterization on downstream tasks, we identify the critical factor, which is the signal-to-noise ratio (SNR), that impacts the generalizability in downstream tasks of both multi-modal and single-modal contrastive learning. Through the cooperation between the two modalities, multi-modal learning can achieve better feature learning, leading to improvements in performance in downstream tasks compared to single-modal learning. Our analysis provides a unified framework that can characterize the optimization and generalization of both single-modal and multi-modal contrastive learning. Empirical experiments on both synthetic and real-world datasets further consolidate our theoretical findings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02837v1-abstract-full').style.display = 'none'; document.getElementById('2411.02837v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">51pages, 1 figure, 1 table</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> NeurIPS 2024 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02737">arXiv:2411.02737</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.02737">pdf</a>, <a href="https://arxiv.org/ps/2411.02737">ps</a>, <a href="https://arxiv.org/format/2411.02737">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Analysis of PDEs">math.AP</span> </div> </div> <p class="title is-5 mathjax"> Modified Wave operators for the Hartree equation with repulsive Coulomb potential </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wenrui Huang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02737v1-abstract-short" style="display: inline;"> We study the final state problem for the Hartree equation with repulsive Coulomb potential: \[i\partial_t u+\frac{1}{2}螖u-\frac{1}{|x|}u=((-螖)^{-1}|u|)^2u\] We show the work in \cite{KaMi} can be extended to the Hartree nonlinearity: Given a prescribed asymptotic profile, we construct a unique global solution scattering to the profile. In particular, the existence of the modified wave operators is&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02737v1-abstract-full').style.display = 'inline'; document.getElementById('2411.02737v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02737v1-abstract-full" style="display: none;"> We study the final state problem for the Hartree equation with repulsive Coulomb potential: \[i\partial_t u+\frac{1}{2}螖u-\frac{1}{|x|}u=((-螖)^{-1}|u|)^2u\] We show the work in \cite{KaMi} can be extended to the Hartree nonlinearity: Given a prescribed asymptotic profile, we construct a unique global solution scattering to the profile. In particular, the existence of the modified wave operators is obtained for sufficiently localized small scattering data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02737v1-abstract-full').style.display = 'none'; document.getElementById('2411.02737v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02199">arXiv:2411.02199</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.02199">pdf</a>, <a href="https://arxiv.org/format/2411.02199">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Provably Transformers Harness Multi-Concept Word Semantics for Efficient In-Context Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Bu%2C+D">Dake Bu</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wei Huang</a>, <a href="/search/?searchtype=author&amp;query=Han%2C+A">Andi Han</a>, <a href="/search/?searchtype=author&amp;query=Nitanda%2C+A">Atsushi Nitanda</a>, <a href="/search/?searchtype=author&amp;query=Suzuki%2C+T">Taiji Suzuki</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+Q">Qingfu Zhang</a>, <a href="/search/?searchtype=author&amp;query=Wong%2C+H">Hau-San Wong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02199v4-abstract-short" style="display: inline;"> Transformer-based large language models (LLMs) have displayed remarkable creative prowess and emergence capabilities. Existing empirical studies have revealed a strong connection between these LLMs&#39; impressive emergence abilities and their in-context learning (ICL) capacity, allowing them to solve new tasks using only task-specific prompts without further fine-tuning. On the other hand, existing e&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02199v4-abstract-full').style.display = 'inline'; document.getElementById('2411.02199v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02199v4-abstract-full" style="display: none;"> Transformer-based large language models (LLMs) have displayed remarkable creative prowess and emergence capabilities. Existing empirical studies have revealed a strong connection between these LLMs&#39; impressive emergence abilities and their in-context learning (ICL) capacity, allowing them to solve new tasks using only task-specific prompts without further fine-tuning. On the other hand, existing empirical and theoretical studies also show that there is a linear regularity of the multi-concept encoded semantic representation behind transformer-based LLMs. However, existing theoretical work fail to build up an understanding of the connection between this regularity and the innovative power of ICL. Additionally, prior work often focuses on simplified, unrealistic scenarios involving linear transformers or unrealistic loss functions, and they achieve only linear or sub-linear convergence rates. In contrast, this work provides a fine-grained mathematical analysis to show how transformers leverage the multi-concept semantics of words to enable powerful ICL and excellent out-of-distribution ICL abilities, offering insights into how transformers innovate solutions for certain unseen tasks encoded with multiple cross-concept semantics. Inspired by empirical studies on the linear latent geometry of LLMs, the analysis is based on a concept-based low-noise sparse coding prompt model. Leveraging advanced techniques, this work showcases the exponential 0-1 loss convergence over the highly non-convex training dynamics, which pioneeringly incorporates the challenges of softmax self-attention, ReLU-activated MLPs, and cross-entropy loss. Empirical simulations corroborate the theoretical findings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02199v4-abstract-full').style.display = 'none'; document.getElementById('2411.02199v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by the 38th Conference on Neural Information Processing Systems (NeurIPS 2024)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02175">arXiv:2411.02175</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.02175">pdf</a>, <a href="https://arxiv.org/format/2411.02175">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SAFE: Slow and Fast Parameter-Efficient Tuning for Continual Learning with Pre-Trained Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhao%2C+L">Linglan Zhao</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+X">Xuerui Zhang</a>, <a href="/search/?searchtype=author&amp;query=Yan%2C+K">Ke Yan</a>, <a href="/search/?searchtype=author&amp;query=Ding%2C+S">Shouhong Ding</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Weiran Huang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02175v1-abstract-short" style="display: inline;"> Continual learning aims to incrementally acquire new concepts in data streams while resisting forgetting previous knowledge. With the rise of powerful pre-trained models (PTMs), there is a growing interest in training incremental learning systems using these foundation models, rather than learning from scratch. Existing works often view PTMs as a strong initial point and directly apply parameter-e&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02175v1-abstract-full').style.display = 'inline'; document.getElementById('2411.02175v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02175v1-abstract-full" style="display: none;"> Continual learning aims to incrementally acquire new concepts in data streams while resisting forgetting previous knowledge. With the rise of powerful pre-trained models (PTMs), there is a growing interest in training incremental learning systems using these foundation models, rather than learning from scratch. Existing works often view PTMs as a strong initial point and directly apply parameter-efficient tuning (PET) in the first session for adapting to downstream tasks. In the following sessions, most methods freeze model parameters for tackling forgetting issues. However, applying PET directly to downstream data cannot fully explore the inherent knowledge in PTMs. Additionally, freezing the parameters in incremental sessions hinders models&#39; plasticity to novel concepts not covered in the first session. To solve the above issues, we propose a Slow And Fast parameter-Efficient tuning (SAFE) framework. In particular, to inherit general knowledge from foundation models, we include a transfer loss function by measuring the correlation between the PTM and the PET-applied model. After calibrating in the first session, the slow efficient tuning parameters can capture more informative features, improving generalization to incoming classes. Moreover, to further incorporate novel concepts, we strike a balance between stability and plasticity by fixing slow efficient tuning parameters and continuously updating the fast ones. Specifically, a cross-classification loss with feature alignment is proposed to circumvent catastrophic forgetting. During inference, we introduce an entropy-based aggregation strategy to dynamically utilize the complementarity in the slow and fast learners. Extensive experiments on seven benchmark datasets verify the effectiveness of our method by significantly surpassing the state-of-the-art. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02175v1-abstract-full').style.display = 'none'; document.getElementById('2411.02175v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by NeurIPS 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02030">arXiv:2411.02030</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.02030">pdf</a>, <a href="https://arxiv.org/ps/2411.02030">ps</a>, <a href="https://arxiv.org/format/2411.02030">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Probability">math.PR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Dynamical Systems">math.DS</span> </div> </div> <p class="title is-5 mathjax"> Finite ergodic components for upper probabilities </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Feng%2C+C">Chunrong Feng</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wen Huang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+C">Chunlin Liu</a>, <a href="/search/?searchtype=author&amp;query=Zhao%2C+H">Huaizhong Zhao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02030v1-abstract-short" style="display: inline;"> Under the notion of ergodicity of upper probability in the sense of Feng and Zhao (2021) that any invariant set either has capacity $0$ or its complement has capacity 0, we introduce the definition of finite ergodic components (FEC). We prove an invariant upper probability has FEC if and only if it is in the regime that any invariant set has either capacity $0$ or capacity $1$, proposed by Cerreia&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02030v1-abstract-full').style.display = 'inline'; document.getElementById('2411.02030v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02030v1-abstract-full" style="display: none;"> Under the notion of ergodicity of upper probability in the sense of Feng and Zhao (2021) that any invariant set either has capacity $0$ or its complement has capacity 0, we introduce the definition of finite ergodic components (FEC). We prove an invariant upper probability has FEC if and only if it is in the regime that any invariant set has either capacity $0$ or capacity $1$, proposed by Cerreia-Vioglio, Maccheroni, and Marinacci (2016). Furthermore, this is also equivalent to that the eigenvalue $1$ of the Koopman operator is of finite multiplicity, while in the ergodic upper probability regime, as in the classical ergodic probability case, the eigenvalue $1$ of the Koopman operator is simple. Additionally, we obtain the equivalence of the law of large numbers with multiple values, the asymptotic independence and the FEC. Furthermore, we apply these to obtain the corresponding results for non-invariant probabilities. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02030v1-abstract-full').style.display = 'none'; document.getElementById('2411.02030v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.01215">arXiv:2411.01215</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.01215">pdf</a>, <a href="https://arxiv.org/format/2411.01215">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Astrophysical Phenomena">astro-ph.HE</span> </div> </div> <p class="title is-5 mathjax"> Detection of two TeV gamma-ray outbursts from NGC 1275 by LHAASO </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Cao%2C+Z">Zhen Cao</a>, <a href="/search/?searchtype=author&amp;query=Aharonian%2C+F">F. Aharonian</a>, <a href="/search/?searchtype=author&amp;query=Axikegu"> Axikegu</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y+X">Y. X. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+Y+W">Y. W. Bao</a>, <a href="/search/?searchtype=author&amp;query=Bastieri%2C+D">D. Bastieri</a>, <a href="/search/?searchtype=author&amp;query=Bi%2C+X+J">X. J. Bi</a>, <a href="/search/?searchtype=author&amp;query=Bi%2C+Y+J">Y. J. Bi</a>, <a href="/search/?searchtype=author&amp;query=Cai%2C+J+T">J. T. Cai</a>, <a href="/search/?searchtype=author&amp;query=Cao%2C+Q">Q. Cao</a>, <a href="/search/?searchtype=author&amp;query=Cao%2C+W+Y">W. Y. Cao</a>, <a href="/search/?searchtype=author&amp;query=Cao%2C+Z">Zhe Cao</a>, <a href="/search/?searchtype=author&amp;query=Chang%2C+J">J. Chang</a>, <a href="/search/?searchtype=author&amp;query=Chang%2C+J+F">J. F. Chang</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+A+M">A. M. Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+E+S">E. S. Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+L">Liang Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+L">Lin Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+L">Long Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+M+J">M. J. Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+M+L">M. L. Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+Q+H">Q. H. Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+S+H">S. H. Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+S+Z">S. Z. Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+T+L">T. L. Chen</a> , et al. (254 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.01215v2-abstract-short" style="display: inline;"> The Water Cherenkov Detector Array (WCDA) is one of the components of Large High Altitude Air Shower Observatory (LHAASO) and can monitor any sources over two-thirds of the sky for up to 7 hours per day with &gt;98\% duty cycle. In this work, we report the detection of two outbursts of the Fanaroff-Riley I radio galaxy NGC 1275 that were detected by LHAASO-WCDA between November 2022 and January 2023&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01215v2-abstract-full').style.display = 'inline'; document.getElementById('2411.01215v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.01215v2-abstract-full" style="display: none;"> The Water Cherenkov Detector Array (WCDA) is one of the components of Large High Altitude Air Shower Observatory (LHAASO) and can monitor any sources over two-thirds of the sky for up to 7 hours per day with &gt;98\% duty cycle. In this work, we report the detection of two outbursts of the Fanaroff-Riley I radio galaxy NGC 1275 that were detected by LHAASO-WCDA between November 2022 and January 2023 with statistical significance of 5.2~$蟽$ and 8.3~$蟽$. The observed spectral energy distribution in the range from 500 GeV to 3 TeV is fitted by a power-law with a best-fit spectral index of $伪=-3.37\pm0.52$ and $-3.35\pm0.29$, respectively. The outburst flux above 0.5~TeV was ($4.55\pm 4.21)\times~10^{-11}~\rm cm^{-2}~s^{-1}$ and ($3.45\pm 1.78)\times~10^{-11}~\rm cm^{-2}~s^{-1}$, corresponding to 60\%, 45\% of Crab Nebula flux. Variation analysis reveals the variability time-scale of days at the TeV energy band. A simple test by one-zone synchrotron self-Compton model reproduces the data in the gamma-ray band well. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01215v2-abstract-full').style.display = 'none'; document.getElementById('2411.01215v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 8 figures, 3 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.01182">arXiv:2411.01182</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.01182">pdf</a>, <a href="https://arxiv.org/format/2411.01182">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> </div> </div> <p class="title is-5 mathjax"> Graph Cross-Correlated Network for Recommendation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Chen%2C+H">Hao Chen</a>, <a href="/search/?searchtype=author&amp;query=Bei%2C+Y">Yuanchen Bei</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wenbing Huang</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+S">Shengyuan Chen</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+F">Feiran Huang</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+X">Xiao Huang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.01182v1-abstract-short" style="display: inline;"> Collaborative filtering (CF) models have demonstrated remarkable performance in recommender systems, which represent users and items as embedding vectors. Recently, due to the powerful modeling capability of graph neural networks for user-item interaction graphs, graph-based CF models have gained increasing attention. They encode each user/item and its subgraph into a single super vector by combin&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01182v1-abstract-full').style.display = 'inline'; document.getElementById('2411.01182v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.01182v1-abstract-full" style="display: none;"> Collaborative filtering (CF) models have demonstrated remarkable performance in recommender systems, which represent users and items as embedding vectors. Recently, due to the powerful modeling capability of graph neural networks for user-item interaction graphs, graph-based CF models have gained increasing attention. They encode each user/item and its subgraph into a single super vector by combining graph embeddings after each graph convolution. However, each hop of the neighbor in the user-item subgraphs carries a specific semantic meaning. Encoding all subgraph information into single vectors and inferring user-item relations with dot products can weaken the semantic information between user and item subgraphs, thus leaving untapped potential. Exploiting this untapped potential provides insight into improving performance for existing recommendation models. To this end, we propose the Graph Cross-correlated Network for Recommendation (GCR), which serves as a general recommendation paradigm that explicitly considers correlations between user/item subgraphs. GCR first introduces the Plain Graph Representation (PGR) to extract information directly from each hop of neighbors into corresponding PGR vectors. Then, GCR develops Cross-Correlated Aggregation (CCA) to construct possible cross-correlated terms between PGR vectors of user/item subgraphs. Finally, GCR comprehensively incorporates the cross-correlated terms for recommendations. Experimental results show that GCR outperforms state-of-the-art models on both interaction prediction and click-through rate prediction tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01182v1-abstract-full').style.display = 'none'; document.getElementById('2411.01182v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, accepted by TKDE</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.00663">arXiv:2411.00663</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.00663">pdf</a>, <a href="https://arxiv.org/ps/2411.00663">ps</a>, <a href="https://arxiv.org/format/2411.00663">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Probability">math.PR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Dynamical Systems">math.DS</span> </div> </div> <p class="title is-5 mathjax"> Ergodicity and Mixing of invariant capacities and applications </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Feng%2C+C">Chunrong Feng</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wen Huang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+C">Chunlin Liu</a>, <a href="/search/?searchtype=author&amp;query=Zhao%2C+H">Huaizhong Zhao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.00663v1-abstract-short" style="display: inline;"> We introduce the notion of common conditional expectation to investigate Birkhoff&#39;s ergodic theorem and subadditive ergodic theorem for invariant upper probabilities. If in addition, the upper probability is ergodic, we construct an invariant probability to characterize the limit of the ergodic mean. Moreover, this skeleton probability is the unique ergodic probability in the core of the upper pro&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.00663v1-abstract-full').style.display = 'inline'; document.getElementById('2411.00663v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.00663v1-abstract-full" style="display: none;"> We introduce the notion of common conditional expectation to investigate Birkhoff&#39;s ergodic theorem and subadditive ergodic theorem for invariant upper probabilities. If in addition, the upper probability is ergodic, we construct an invariant probability to characterize the limit of the ergodic mean. Moreover, this skeleton probability is the unique ergodic probability in the core of the upper probability, that is equal to all probabilities in the core on all invariant sets. We have the following applications of these two theorems: $\bullet$ provide a strong law of large numbers for ergodic stationary sequence on upper probability spaces; $\bullet$ prove the multiplicative ergodic theorem on upper probability spaces; $\bullet$ establish a criterion for the ergodicity of upper probabilities in terms of independence. Furthermore, we introduce and study weak mixing for capacity preserving systems. Using the skeleton idea, we also provide several characterizations of weak mixing for invariant upper probabilities. Finally, we provide examples of ergodic and weakly mixing capacity preserving systems. As applications, we obtain new results in the classical ergodic theory. e.g. in characterizing dynamical properties on measure preserving systems, such as weak mixing, periodicity. Moreover, we use our results in the nonlinear theory to obtain the asymptotic independence, Birkhoff&#39;s type ergodic theorem, subadditive ergodic theorem, and multiplicative ergodic theorem for non-invariant probabilities. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.00663v1-abstract-full').style.display = 'none'; document.getElementById('2411.00663v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.23674">arXiv:2410.23674</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.23674">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> </div> </div> <p class="title is-5 mathjax"> Atom-light-correlated quantum interferometer with memory-induced phase comb </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wenfeng Huang</a>, <a href="/search/?searchtype=author&amp;query=Liang%2C+X">Xinyun Liang</a>, <a href="/search/?searchtype=author&amp;query=Zhao%2C+J">Jie Zhao</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+Z">Zeliang Wu</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+K">Keye Zhang</a>, <a href="/search/?searchtype=author&amp;query=Yuan%2C+C">Chun-Hua Yuan</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+Y">Yuan Wu</a>, <a href="/search/?searchtype=author&amp;query=Fan%2C+B">Bixuan Fan</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+W">Weiping Zhang</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+L">Liqing Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.23674v1-abstract-short" style="display: inline;"> Precise phase measurements by interferometers are crucial in science for detecting subtle changes, such as gravitational waves. However, phase sensitivity is typically limited by the standard quantum limit (SQL) with uncorrelated particles N. This limit can be surpassed using quantum correlations, but achieving high-quality correlations in large systems is challenging. Here, we propose and demonst&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.23674v1-abstract-full').style.display = 'inline'; document.getElementById('2410.23674v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.23674v1-abstract-full" style="display: none;"> Precise phase measurements by interferometers are crucial in science for detecting subtle changes, such as gravitational waves. However, phase sensitivity is typically limited by the standard quantum limit (SQL) with uncorrelated particles N. This limit can be surpassed using quantum correlations, but achieving high-quality correlations in large systems is challenging. Here, we propose and demonstrate an atom-light hybrid quantum interferometry whose sensitivity is enhanced beyond the SQL with atom-light quantum correlation and newly developed phase comb superposition via atomic-memory-assisted multiple quantum amplification. Finally, a phase sensitivity beyond the SQL of up to $8.3\pm 0.2$ dB is achieved, especially at $N=4 \times10^{13}/s$, resulting in both atomic and optical phase sensitivities of $6\times10^{-8} rad/\sqrt{Hz}$. This technique can advance sensitive quantum measurements in various fields. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.23674v1-abstract-full').style.display = 'none'; document.getElementById('2410.23674v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 3 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.23526">arXiv:2410.23526</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.23526">pdf</a>, <a href="https://arxiv.org/format/2410.23526">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> LEAF: Learning and Evaluation Augmented by Fact-Checking to Improve Factualness in Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Tran%2C+H">Hieu Tran</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+J">Junda Wang</a>, <a href="/search/?searchtype=author&amp;query=Ting%2C+Y">Yujan Ting</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Weijing Huang</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+T">Terrence Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.23526v1-abstract-short" style="display: inline;"> Large language models (LLMs) have shown remarkable capabilities in various natural language processing tasks, yet they often struggle with maintaining factual accuracy, particularly in knowledge-intensive domains like healthcare. This study introduces LEAF: Learning and Evaluation Augmented by Fact-Checking, a novel approach designed to enhance the factual reliability of LLMs, with a focus on medi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.23526v1-abstract-full').style.display = 'inline'; document.getElementById('2410.23526v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.23526v1-abstract-full" style="display: none;"> Large language models (LLMs) have shown remarkable capabilities in various natural language processing tasks, yet they often struggle with maintaining factual accuracy, particularly in knowledge-intensive domains like healthcare. This study introduces LEAF: Learning and Evaluation Augmented by Fact-Checking, a novel approach designed to enhance the factual reliability of LLMs, with a focus on medical question answering (QA). LEAF utilizes a dual strategy to enhance the factual accuracy of responses from models such as Llama 3 70B Instruct and Llama 3 8B Instruct. The first strategy, Fact-Check-Then-RAG, improves Retrieval-Augmented Generation (RAG) by incorporating fact-checking results to guide the retrieval process without updating model parameters. The second strategy, Learning from Fact-Checks via Self-Training, involves supervised fine-tuning (SFT) on fact-checked responses or applying Simple Preference Optimization (SimPO) with fact-checking as a ranking mechanism, both updating LLM parameters from supervision. These findings suggest that integrating fact-checked responses whether through RAG enhancement or self-training enhances the reliability and factual correctness of LLM outputs, offering a promising solution for applications where information accuracy is crucial. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.23526v1-abstract-full').style.display = 'none'; document.getElementById('2410.23526v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">22 pages, 9 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.22973">arXiv:2410.22973</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.22973">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Atomic Physics">physics.atom-ph</span> </div> </div> <p class="title is-5 mathjax"> International comparison of optical frequencies with transportable optical lattice clocks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Clock%2C+I">International Clock</a>, <a href="/search/?searchtype=author&amp;query=Networking%2C+O">Oscillator Networking</a>, <a href="/search/?searchtype=author&amp;query=Collaboration"> Collaboration</a>, <a href="/search/?searchtype=author&amp;query=%3A"> :</a>, <a href="/search/?searchtype=author&amp;query=Amy-Klein%2C+A">Anne Amy-Klein</a>, <a href="/search/?searchtype=author&amp;query=Benkler%2C+E">Erik Benkler</a>, <a href="/search/?searchtype=author&amp;query=Blond%C3%A9%2C+P">Pascal Blond茅</a>, <a href="/search/?searchtype=author&amp;query=Bongs%2C+K">Kai Bongs</a>, <a href="/search/?searchtype=author&amp;query=Cantin%2C+E">Etienne Cantin</a>, <a href="/search/?searchtype=author&amp;query=Chardonnet%2C+C">Christian Chardonnet</a>, <a href="/search/?searchtype=author&amp;query=Denker%2C+H">Heiner Denker</a>, <a href="/search/?searchtype=author&amp;query=D%C3%B6rscher%2C+S">S枚ren D枚rscher</a>, <a href="/search/?searchtype=author&amp;query=Feng%2C+C">Chen-Hao Feng</a>, <a href="/search/?searchtype=author&amp;query=Gaudron%2C+J">Jacques-Olivier Gaudron</a>, <a href="/search/?searchtype=author&amp;query=Gill%2C+P">Patrick Gill</a>, <a href="/search/?searchtype=author&amp;query=Hill%2C+I+R">Ian R Hill</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wei Huang</a>, <a href="/search/?searchtype=author&amp;query=Johnson%2C+M+Y+H">Matthew Y H Johnson</a>, <a href="/search/?searchtype=author&amp;query=Kale%2C+Y+B">Yogeshwar B Kale</a>, <a href="/search/?searchtype=author&amp;query=Katori%2C+H">Hidetoshi Katori</a>, <a href="/search/?searchtype=author&amp;query=Klose%2C+J">Joshua Klose</a>, <a href="/search/?searchtype=author&amp;query=Kronj%C3%A4ger%2C+J">Jochen Kronj盲ger</a>, <a href="/search/?searchtype=author&amp;query=Kuhl%2C+A">Alexander Kuhl</a>, <a href="/search/?searchtype=author&amp;query=Targat%2C+R+L">Rodolphe Le Targat</a>, <a href="/search/?searchtype=author&amp;query=Lisdat%2C+C">Christian Lisdat</a> , et al. (15 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.22973v1-abstract-short" style="display: inline;"> Optical clocks have improved their frequency stability and estimated accuracy by more than two orders of magnitude over the best caesium microwave clocks that realise the SI second. Accordingly, an optical redefinition of the second has been widely discussed, prompting a need for the consistency of optical clocks to be verified worldwide. While satellite frequency links are sufficient to compare m&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.22973v1-abstract-full').style.display = 'inline'; document.getElementById('2410.22973v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.22973v1-abstract-full" style="display: none;"> Optical clocks have improved their frequency stability and estimated accuracy by more than two orders of magnitude over the best caesium microwave clocks that realise the SI second. Accordingly, an optical redefinition of the second has been widely discussed, prompting a need for the consistency of optical clocks to be verified worldwide. While satellite frequency links are sufficient to compare microwave clocks, a suitable method for comparing high-performance optical clocks over intercontinental distances is missing. Furthermore, remote comparisons over frequency links face fractional uncertainties of a few $10^{-18}$ due to imprecise knowledge of each clock&#39;s relativistic redshift, which stems from uncertainty in the geopotential determined at each distant location. Here, we report a landmark campaign towards the era of optical clocks, where, for the first time, state-of-the-art transportable optical clocks from Japan and Europe are brought together to demonstrate international comparisons that require neither a high-performance frequency link nor information on the geopotential difference between remote sites. Conversely, the reproducibility of the clocks after being transported between countries was sufficient to determine geopotential height offsets at the level of 4 cm. Our campaign paves the way for redefining the SI second and has a significant impact on various applications, including tests of general relativity, geodetic sensing for geosciences, precise navigation, and future timing networks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.22973v1-abstract-full').style.display = 'none'; document.getElementById('2410.22973v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">29 pages, 5 figures</span> </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Huang%2C+W&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Huang%2C+W&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Huang%2C+W&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Huang%2C+W&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Huang%2C+W&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Huang%2C+W&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10