CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 6,331 results for author: <span class="mathjax">Zhou, Y</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/" aria-role="search"> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Zhou, Y"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Zhou%2C+Y&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Zhou, Y"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.18309">arXiv:2411.18309</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.18309">pdf</a>, <a href="https://arxiv.org/format/2411.18309">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> MvKeTR: Chest CT Report Generation with Multi-View Perception and Knowledge Enhancement </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Deng%2C+X">Xiwei Deng</a>, <a href="/search/?searchtype=author&amp;query=He%2C+X">Xianchun He</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yudan Zhou</a>, <a href="/search/?searchtype=author&amp;query=Cai%2C+S">Shuhui Cai</a>, <a href="/search/?searchtype=author&amp;query=Cai%2C+C">Congbo Cai</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+Z">Zhong Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.18309v1-abstract-short" style="display: inline;"> CT report generation (CTRG) aims to automatically generate diagnostic reports for 3D volumes, relieving clinicians&#39; workload and improving patient care. Despite clinical value, existing works fail to effectively incorporate diagnostic information from multiple anatomical views and lack related clinical expertise essential for accurate and reliable diagnosis. To resolve these limitations, we propos&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.18309v1-abstract-full').style.display = 'inline'; document.getElementById('2411.18309v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.18309v1-abstract-full" style="display: none;"> CT report generation (CTRG) aims to automatically generate diagnostic reports for 3D volumes, relieving clinicians&#39; workload and improving patient care. Despite clinical value, existing works fail to effectively incorporate diagnostic information from multiple anatomical views and lack related clinical expertise essential for accurate and reliable diagnosis. To resolve these limitations, we propose a novel Multi-view perception Knowledge-enhanced Tansformer (MvKeTR) to mimic the diagnostic workflow of clinicians. Just as radiologists first examine CT scans from multiple planes, a Multi-View Perception Aggregator (MVPA) with view-aware attention effectively synthesizes diagnostic information from multiple anatomical views. Then, inspired by how radiologists further refer to relevant clinical records to guide diagnostic decision-making, a Cross-Modal Knowledge Enhancer (CMKE) retrieves the most similar reports based on the query volume to incorporate domain knowledge into the diagnosis procedure. Furthermore, instead of traditional MLPs, we employ Kolmogorov-Arnold Networks (KANs) with learnable nonlinear activation functions as the fundamental building blocks of both modules to better capture intricate diagnostic patterns in CT interpretation. Extensive experiments on the public CTRG-Chest-548K dataset demonstrate that our method outpaces prior state-of-the-art models across all metrics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.18309v1-abstract-full').style.display = 'none'; document.getElementById('2411.18309v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 10 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.18301">arXiv:2411.18301</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.18301">pdf</a>, <a href="https://arxiv.org/format/2411.18301">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Enhancing MMDiT-Based Text-to-Image Models for Similar Subject Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Wei%2C+T">Tianyi Wei</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+D">Dongdong Chen</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yifan Zhou</a>, <a href="/search/?searchtype=author&amp;query=Pan%2C+X">Xingang Pan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.18301v1-abstract-short" style="display: inline;"> Representing the cutting-edge technique of text-to-image models, the latest Multimodal Diffusion Transformer (MMDiT) largely mitigates many generation issues existing in previous models. However, we discover that it still suffers from subject neglect or mixing when the input text prompt contains multiple subjects of similar semantics or appearance. We identify three possible ambiguities within the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.18301v1-abstract-full').style.display = 'inline'; document.getElementById('2411.18301v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.18301v1-abstract-full" style="display: none;"> Representing the cutting-edge technique of text-to-image models, the latest Multimodal Diffusion Transformer (MMDiT) largely mitigates many generation issues existing in previous models. However, we discover that it still suffers from subject neglect or mixing when the input text prompt contains multiple subjects of similar semantics or appearance. We identify three possible ambiguities within the MMDiT architecture that cause this problem: Inter-block Ambiguity, Text Encoder Ambiguity, and Semantic Ambiguity. To address these issues, we propose to repair the ambiguous latent on-the-fly by test-time optimization at early denoising steps. In detail, we design three loss functions: Block Alignment Loss, Text Encoder Alignment Loss, and Overlap Loss, each tailored to mitigate these ambiguities. Despite significant improvements, we observe that semantic ambiguity persists when generating multiple similar subjects, as the guidance provided by overlap loss is not explicit enough. Therefore, we further propose Overlap Online Detection and Back-to-Start Sampling Strategy to alleviate the problem. Experimental results on a newly constructed challenging dataset of similar subjects validate the effectiveness of our approach, showing superior generation quality and much higher success rates over existing methods. Our code will be available at https://github.com/wtybest/EnMMDiT. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.18301v1-abstract-full').style.display = 'none'; document.getElementById('2411.18301v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.18290">arXiv:2411.18290</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.18290">pdf</a>, <a href="https://arxiv.org/format/2411.18290">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Leveraging Semantic Asymmetry for Precise Gross Tumor Volume Segmentation of Nasopharyngeal Carcinoma in Planning CT </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Li%2C+Z">Zi Li</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+Y">Ying Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+Z">Zeli Chen</a>, <a href="/search/?searchtype=author&amp;query=Su%2C+Y">Yanzhou Su</a>, <a href="/search/?searchtype=author&amp;query=Ma%2C+T">Tai Ma</a>, <a href="/search/?searchtype=author&amp;query=Mok%2C+T+C+W">Tony C. W. Mok</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yan-Jie Zhou</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Yunhai Bai</a>, <a href="/search/?searchtype=author&amp;query=Zheng%2C+Z">Zhinlin Zheng</a>, <a href="/search/?searchtype=author&amp;query=Lu%2C+L">Le Lu</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+Y">Yirui Wang</a>, <a href="/search/?searchtype=author&amp;query=Ge%2C+J">Jia Ge</a>, <a href="/search/?searchtype=author&amp;query=Ye%2C+X">Xianghua Ye</a>, <a href="/search/?searchtype=author&amp;query=Yan%2C+S">Senxiang Yan</a>, <a href="/search/?searchtype=author&amp;query=Jin%2C+D">Dakai Jin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.18290v1-abstract-short" style="display: inline;"> In the radiation therapy of nasopharyngeal carcinoma (NPC), clinicians typically delineate the gross tumor volume (GTV) using non-contrast planning computed tomography to ensure accurate radiation dose delivery. However, the low contrast between tumors and adjacent normal tissues necessitates that radiation oncologists manually delineate the tumors, often relying on diagnostic MRI for guidance. %&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.18290v1-abstract-full').style.display = 'inline'; document.getElementById('2411.18290v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.18290v1-abstract-full" style="display: none;"> In the radiation therapy of nasopharyngeal carcinoma (NPC), clinicians typically delineate the gross tumor volume (GTV) using non-contrast planning computed tomography to ensure accurate radiation dose delivery. However, the low contrast between tumors and adjacent normal tissues necessitates that radiation oncologists manually delineate the tumors, often relying on diagnostic MRI for guidance. % In this study, we propose a novel approach to directly segment NPC gross tumors on non-contrast planning CT images, circumventing potential registration errors when aligning MRI or MRI-derived tumor masks to planning CT. To address the low contrast issues between tumors and adjacent normal structures in planning CT, we introduce a 3D Semantic Asymmetry Tumor segmentation (SATs) method. Specifically, we posit that a healthy nasopharyngeal region is characteristically bilaterally symmetric, whereas the emergence of nasopharyngeal carcinoma disrupts this symmetry. Then, we propose a Siamese contrastive learning segmentation framework that minimizes the voxel-wise distance between original and flipped areas without tumor and encourages a larger distance between original and flipped areas with tumor. Thus, our approach enhances the sensitivity of features to semantic asymmetries. % Extensive experiments demonstrate that the proposed SATs achieves the leading NPC GTV segmentation performance in both internal and external testing, \emph{e.g.}, with at least 2\% absolute Dice score improvement and 12\% average distance error reduction when compared to other state-of-the-art methods in the external testing. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.18290v1-abstract-full').style.display = 'none'; document.getElementById('2411.18290v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.17949">arXiv:2411.17949</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.17949">pdf</a>, <a href="https://arxiv.org/format/2411.17949">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> ROICtrl: Boosting Instance Control for Visual Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Gu%2C+Y">Yuchao Gu</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yipin Zhou</a>, <a href="/search/?searchtype=author&amp;query=Ye%2C+Y">Yunfan Ye</a>, <a href="/search/?searchtype=author&amp;query=Nie%2C+Y">Yixin Nie</a>, <a href="/search/?searchtype=author&amp;query=Yu%2C+L">Licheng Yu</a>, <a href="/search/?searchtype=author&amp;query=Ma%2C+P">Pingchuan Ma</a>, <a href="/search/?searchtype=author&amp;query=Lin%2C+K+Q">Kevin Qinghong Lin</a>, <a href="/search/?searchtype=author&amp;query=Shou%2C+M+Z">Mike Zheng Shou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.17949v1-abstract-short" style="display: inline;"> Natural language often struggles to accurately associate positional and attribute information with multiple instances, which limits current text-based visual generation models to simpler compositions featuring only a few dominant instances. To address this limitation, this work enhances diffusion models by introducing regional instance control, where each instance is governed by a bounding box pai&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17949v1-abstract-full').style.display = 'inline'; document.getElementById('2411.17949v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.17949v1-abstract-full" style="display: none;"> Natural language often struggles to accurately associate positional and attribute information with multiple instances, which limits current text-based visual generation models to simpler compositions featuring only a few dominant instances. To address this limitation, this work enhances diffusion models by introducing regional instance control, where each instance is governed by a bounding box paired with a free-form caption. Previous methods in this area typically rely on implicit position encoding or explicit attention masks to separate regions of interest (ROIs), resulting in either inaccurate coordinate injection or large computational overhead. Inspired by ROI-Align in object detection, we introduce a complementary operation called ROI-Unpool. Together, ROI-Align and ROI-Unpool enable explicit, efficient, and accurate ROI manipulation on high-resolution feature maps for visual generation. Building on ROI-Unpool, we propose ROICtrl, an adapter for pretrained diffusion models that enables precise regional instance control. ROICtrl is compatible with community-finetuned diffusion models, as well as with existing spatial-based add-ons (\eg, ControlNet, T2I-Adapter) and embedding-based add-ons (\eg, IP-Adapter, ED-LoRA), extending their applications to multi-instance generation. Experiments show that ROICtrl achieves superior performance in regional instance control while significantly reducing computational costs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17949v1-abstract-full').style.display = 'none'; document.getElementById('2411.17949v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Project page at https://roictrl.github.io/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.17864">arXiv:2411.17864</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.17864">pdf</a>, <a href="https://arxiv.org/format/2411.17864">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Generative Image Layer Decomposition with Visual Effects </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Yang%2C+J">Jinrui Yang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+Q">Qing Liu</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+Y">Yijun Li</a>, <a href="/search/?searchtype=author&amp;query=Kim%2C+S+Y">Soo Ye Kim</a>, <a href="/search/?searchtype=author&amp;query=Pakhomov%2C+D">Daniil Pakhomov</a>, <a href="/search/?searchtype=author&amp;query=Ren%2C+M">Mengwei Ren</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+J">Jianming Zhang</a>, <a href="/search/?searchtype=author&amp;query=Lin%2C+Z">Zhe Lin</a>, <a href="/search/?searchtype=author&amp;query=Xie%2C+C">Cihang Xie</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yuyin Zhou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.17864v1-abstract-short" style="display: inline;"> Recent advancements in large generative models, particularly diffusion-based methods, have significantly enhanced the capabilities of image editing. However, achieving precise control over image composition tasks remains a challenge. Layered representations, which allow for independent editing of image components, are essential for user-driven content creation, yet existing approaches often strugg&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17864v1-abstract-full').style.display = 'inline'; document.getElementById('2411.17864v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.17864v1-abstract-full" style="display: none;"> Recent advancements in large generative models, particularly diffusion-based methods, have significantly enhanced the capabilities of image editing. However, achieving precise control over image composition tasks remains a challenge. Layered representations, which allow for independent editing of image components, are essential for user-driven content creation, yet existing approaches often struggle to decompose image into plausible layers with accurately retained transparent visual effects such as shadows and reflections. We propose $\textbf{LayerDecomp}$, a generative framework for image layer decomposition which outputs photorealistic clean backgrounds and high-quality transparent foregrounds with faithfully preserved visual effects. To enable effective training, we first introduce a dataset preparation pipeline that automatically scales up simulated multi-layer data with synthesized visual effects. To further enhance real-world applicability, we supplement this simulated dataset with camera-captured images containing natural visual effects. Additionally, we propose a consistency loss which enforces the model to learn accurate representations for the transparent foreground layer when ground-truth annotations are not available. Our method achieves superior quality in layer decomposition, outperforming existing approaches in object removal and spatial editing tasks across several benchmarks and multiple user studies, unlocking various creative possibilities for layer-wise image editing. The project page is https://rayjryang.github.io/LayerDecomp. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17864v1-abstract-full').style.display = 'none'; document.getElementById('2411.17864v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The project page: https://rayjryang.github.io/LayerDecomp</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.17837">arXiv:2411.17837</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.17837">pdf</a>, <a href="https://arxiv.org/format/2411.17837">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> OracleSage: Towards Unified Visual-Linguistic Understanding of Oracle Bone Scripts through Cross-Modal Knowledge Fusion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Jiang%2C+H">Hanqi Jiang</a>, <a href="/search/?searchtype=author&amp;query=Pan%2C+Y">Yi Pan</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+J">Junhao Chen</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+Z">Zhengliang Liu</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yifan Zhou</a>, <a href="/search/?searchtype=author&amp;query=Shu%2C+P">Peng Shu</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+Y">Yiwei Li</a>, <a href="/search/?searchtype=author&amp;query=Zhao%2C+H">Huaqin Zhao</a>, <a href="/search/?searchtype=author&amp;query=Mihm%2C+S">Stephen Mihm</a>, <a href="/search/?searchtype=author&amp;query=Howe%2C+L+C">Lewis C Howe</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+T">Tianming Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.17837v1-abstract-short" style="display: inline;"> Oracle bone script (OBS), as China&#39;s earliest mature writing system, present significant challenges in automatic recognition due to their complex pictographic structures and divergence from modern Chinese characters. We introduce OracleSage, a novel cross-modal framework that integrates hierarchical visual understanding with graph-based semantic reasoning. Specifically, we propose (1) a Hierarchic&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17837v1-abstract-full').style.display = 'inline'; document.getElementById('2411.17837v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.17837v1-abstract-full" style="display: none;"> Oracle bone script (OBS), as China&#39;s earliest mature writing system, present significant challenges in automatic recognition due to their complex pictographic structures and divergence from modern Chinese characters. We introduce OracleSage, a novel cross-modal framework that integrates hierarchical visual understanding with graph-based semantic reasoning. Specifically, we propose (1) a Hierarchical Visual-Semantic Understanding module that enables multi-granularity feature extraction through progressive fine-tuning of LLaVA&#39;s visual backbone, (2) a Graph-based Semantic Reasoning Framework that captures relationships between visual components and semantic concepts through dynamic message passing, and (3) OracleSem, a semantically enriched OBS dataset with comprehensive pictographic and semantic annotations. Experimental results demonstrate that OracleSage significantly outperforms state-of-the-art vision-language models. This research establishes a new paradigm for ancient text interpretation while providing valuable technical support for archaeological studies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17837v1-abstract-full').style.display = 'none'; document.getElementById('2411.17837v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.17616">arXiv:2411.17616</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.17616">pdf</a>, <a href="https://arxiv.org/format/2411.17616">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Accelerating Vision Diffusion Transformers with Skip Branches </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Chen%2C+G">Guanjie Chen</a>, <a href="/search/?searchtype=author&amp;query=Zhao%2C+X">Xinyu Zhao</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yucheng Zhou</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+T">Tianlong Chen</a>, <a href="/search/?searchtype=author&amp;query=Yu%2C+C">Cheng Yu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.17616v1-abstract-short" style="display: inline;"> Diffusion Transformers (DiT), an emerging image and video generation model architecture, has demonstrated great potential because of its high generation quality and scalability properties. Despite the impressive performance, its practical deployment is constrained by computational complexity and redundancy in the sequential denoising process. While feature caching across timesteps has proven effec&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17616v1-abstract-full').style.display = 'inline'; document.getElementById('2411.17616v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.17616v1-abstract-full" style="display: none;"> Diffusion Transformers (DiT), an emerging image and video generation model architecture, has demonstrated great potential because of its high generation quality and scalability properties. Despite the impressive performance, its practical deployment is constrained by computational complexity and redundancy in the sequential denoising process. While feature caching across timesteps has proven effective in accelerating diffusion models, its application to DiT is limited by fundamental architectural differences from U-Net-based approaches. Through empirical analysis of DiT feature dynamics, we identify that significant feature variation between DiT blocks presents a key challenge for feature reusability. To address this, we convert standard DiT into Skip-DiT with skip branches to enhance feature smoothness. Further, we introduce Skip-Cache which utilizes the skip branches to cache DiT features across timesteps at the inference time. We validated effectiveness of our proposal on different DiT backbones for video and image generation, showcasing skip branches to help preserve generation quality and achieve higher speedup. Experimental results indicate that Skip-DiT achieves a 1.5x speedup almost for free and a 2.2x speedup with only a minor reduction in quantitative metrics. Code is available at https://github.com/OpenSparseLLMs/Skip-DiT.git. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17616v1-abstract-full').style.display = 'none'; document.getElementById('2411.17616v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">17 pages, 8 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.17125">arXiv:2411.17125</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.17125">pdf</a>, <a href="https://arxiv.org/format/2411.17125">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> DOGE: Towards Versatile Visual Document Grounding and Referring </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yinan Zhou</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+Y">Yuxin Chen</a>, <a href="/search/?searchtype=author&amp;query=Lin%2C+H">Haokun Lin</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+S">Shuyu Yang</a>, <a href="/search/?searchtype=author&amp;query=Zhu%2C+L">Li Zhu</a>, <a href="/search/?searchtype=author&amp;query=Qi%2C+Z">Zhongang Qi</a>, <a href="/search/?searchtype=author&amp;query=Ma%2C+C">Chen Ma</a>, <a href="/search/?searchtype=author&amp;query=Shan%2C+Y">Ying Shan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.17125v1-abstract-short" style="display: inline;"> In recent years, Multimodal Large Language Models (MLLMs) have increasingly emphasized grounding and referring capabilities to achieve detailed understanding and flexible user interaction. However, in the realm of visual document understanding, these capabilities lag behind due to the scarcity of fine-grained datasets and comprehensive benchmarks. To fill this gap, we propose the DOcument Groundin&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17125v1-abstract-full').style.display = 'inline'; document.getElementById('2411.17125v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.17125v1-abstract-full" style="display: none;"> In recent years, Multimodal Large Language Models (MLLMs) have increasingly emphasized grounding and referring capabilities to achieve detailed understanding and flexible user interaction. However, in the realm of visual document understanding, these capabilities lag behind due to the scarcity of fine-grained datasets and comprehensive benchmarks. To fill this gap, we propose the DOcument Grounding and Eferring data engine (DOGE-Engine), which produces two types of high-quality fine-grained document data: multi-granular parsing data for enhancing fundamental text localization and recognition capabilities; and instruction-tuning data to activate MLLM&#39;s grounding and referring capabilities during dialogue and reasoning. Additionally, using our engine, we construct DOGE-Bench, which encompasses 7 grounding and referring tasks across 3 document types (chart, poster, PDF document), providing comprehensive evaluations for fine-grained document understanding. Furthermore, leveraging the data generated by our engine, we develop a strong baseline model, DOGE. This pioneering MLLM is capable of accurately referring and grounding texts at multiple granularities within document images. Our code, data, and model will be open-sourced for community development. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17125v1-abstract-full').style.display = 'none'; document.getElementById('2411.17125v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">20 pages, 13 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.16937">arXiv:2411.16937</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.16937">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Traffic Wave Properties for Automated Vehicles During Traffic Oscillations via Analytical Approximations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yang Zhou</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+S">Sixu Li</a>, <a href="/search/?searchtype=author&amp;query=Kontar%2C+W">Wissam Kontar</a>, <a href="/search/?searchtype=author&amp;query=Pu%2C+F">Fan Pu</a>, <a href="/search/?searchtype=author&amp;query=Srivastava%2C+A">Anupam Srivastava</a>, <a href="/search/?searchtype=author&amp;query=Ahn%2C+S">Soyoung Ahn</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.16937v1-abstract-short" style="display: inline;"> This paper presents an analytical approximation framework to understand the dynamics of traffic wave propagation for Automated Vehicles (AVs) during traffic oscillations. The framework systematically unravels the intricate relationships between the longitudinal control model of the AVs and the properties of traffic waves. We apply Laplacian Transformation and Describing Function Analysis to mathem&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16937v1-abstract-full').style.display = 'inline'; document.getElementById('2411.16937v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.16937v1-abstract-full" style="display: none;"> This paper presents an analytical approximation framework to understand the dynamics of traffic wave propagation for Automated Vehicles (AVs) during traffic oscillations. The framework systematically unravels the intricate relationships between the longitudinal control model of the AVs and the properties of traffic waves. We apply Laplacian Transformation and Describing Function Analysis to mathematically derive the traffic wave properties of an AV in car-following scenarios. Further, we incorporate Newell&#39;s car-following model to determine the speed of the traffic waves. Our analysis extends to both homogenous and heterogenous traffic, systematically handling intra-heterogeneities and inter-heterogeneities in traffic wave propagation using the established analytical framework. We validate our approach via numerical simulations and show the connections between the AV control system and traffic wave properties. This research emphasizes the importance of rethinking our understanding of traffic wave properties when AVs are present in the traffic system. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16937v1-abstract-full').style.display = 'none'; document.getElementById('2411.16937v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.16686">arXiv:2411.16686</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.16686">pdf</a>, <a href="https://arxiv.org/format/2411.16686">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Biomolecules">q-bio.BM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> ProteinWeaver: A Divide-and-Assembly Approach for Protein Backbone Design </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Ma%2C+Y">Yiming Ma</a>, <a href="/search/?searchtype=author&amp;query=Ye%2C+F">Fei Ye</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yi Zhou</a>, <a href="/search/?searchtype=author&amp;query=Zheng%2C+Z">Zaixiang Zheng</a>, <a href="/search/?searchtype=author&amp;query=Xue%2C+D">Dongyu Xue</a>, <a href="/search/?searchtype=author&amp;query=Gu%2C+Q">Quanquan Gu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.16686v2-abstract-short" style="display: inline;"> Nature creates diverse proteins through a &#39;divide and assembly&#39; strategy. Inspired by this idea, we introduce ProteinWeaver, a two-stage framework for protein backbone design. Our method first generates individual protein domains and then employs an SE(3) diffusion model to flexibly assemble these domains. A key challenge lies in the assembling step, given the complex and rugged nature of the inte&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16686v2-abstract-full').style.display = 'inline'; document.getElementById('2411.16686v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.16686v2-abstract-full" style="display: none;"> Nature creates diverse proteins through a &#39;divide and assembly&#39; strategy. Inspired by this idea, we introduce ProteinWeaver, a two-stage framework for protein backbone design. Our method first generates individual protein domains and then employs an SE(3) diffusion model to flexibly assemble these domains. A key challenge lies in the assembling step, given the complex and rugged nature of the inter-domain interaction landscape. To address this challenge, we employ preference alignment to discern complex relationships between structure and interaction landscapes through comparative analysis of generated samples. Comprehensive experiments demonstrate that ProteinWeaver: (1) generates high-quality, novel protein backbones through versatile domain assembly; (2) outperforms RFdiffusion, the current state-of-the-art in backbone design, by 13\% and 39\% for long-chain proteins; (3) shows the potential for cooperative function design through illustrative case studies. To sum up, by introducing a `divide-and-assembly&#39; paradigm, ProteinWeaver advances protein engineering and opens new avenues for functional protein design. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16686v2-abstract-full').style.display = 'none'; document.getElementById('2411.16686v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">19 pages, 10 figures, 3 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.16577">arXiv:2411.16577</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.16577">pdf</a>, <a href="https://arxiv.org/format/2411.16577">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Earth and Planetary Astrophysics">astro-ph.EP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Solar and Stellar Astrophysics">astro-ph.SR</span> </div> </div> <p class="title is-5 mathjax"> The JWST Weather Report from the Isolated Exoplanet Analog SIMP 0136+0933: Pressure-Dependent Variability Driven by Multiple Mechanisms </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=McCarthy%2C+A+M">Allison M. McCarthy</a>, <a href="/search/?searchtype=author&amp;query=Vos%2C+J+M">Johanna M. Vos</a>, <a href="/search/?searchtype=author&amp;query=Muirhead%2C+P+S">Philip S. Muirhead</a>, <a href="/search/?searchtype=author&amp;query=Biller%2C+B+A">Beth A. Biller</a>, <a href="/search/?searchtype=author&amp;query=Morley%2C+C+V">Caroline V. Morley</a>, <a href="/search/?searchtype=author&amp;query=Faherty%2C+J">Jacqueline Faherty</a>, <a href="/search/?searchtype=author&amp;query=Burningham%2C+B">Ben Burningham</a>, <a href="/search/?searchtype=author&amp;query=Calamari%2C+E">Emily Calamari</a>, <a href="/search/?searchtype=author&amp;query=Cowan%2C+N+B">Nicolas B. Cowan</a>, <a href="/search/?searchtype=author&amp;query=Cruz%2C+K+L">Kelle L. Cruz</a>, <a href="/search/?searchtype=author&amp;query=Gonzales%2C+E">Eileen Gonzales</a>, <a href="/search/?searchtype=author&amp;query=Limbach%2C+M+A">Mary Anne Limbach</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+P">Pengyu Liu</a>, <a href="/search/?searchtype=author&amp;query=Nasedkin%2C+E">Evert Nasedkin</a>, <a href="/search/?searchtype=author&amp;query=Suarez%2C+G">Genaro Suarez</a>, <a href="/search/?searchtype=author&amp;query=Tan%2C+X">Xianyu Tan</a>, <a href="/search/?searchtype=author&amp;query=O%27Toole%2C+C">Cian O&#39;Toole</a>, <a href="/search/?searchtype=author&amp;query=Visscher%2C+C">Channon Visscher</a>, <a href="/search/?searchtype=author&amp;query=Whiteford%2C+N">Niall Whiteford</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yifan Zhou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.16577v1-abstract-short" style="display: inline;"> Isolated planetary-mass objects share their mass range with planets but do not orbit a star. They lack the necessary mass to support fusion in their cores and thermally radiate their heat from formation as they cool, primarily at infrared wavelengths. Many isolated planetary-mass objects show variations in their infrared brightness consistent with non-uniform atmospheric features modulated by thei&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16577v1-abstract-full').style.display = 'inline'; document.getElementById('2411.16577v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.16577v1-abstract-full" style="display: none;"> Isolated planetary-mass objects share their mass range with planets but do not orbit a star. They lack the necessary mass to support fusion in their cores and thermally radiate their heat from formation as they cool, primarily at infrared wavelengths. Many isolated planetary-mass objects show variations in their infrared brightness consistent with non-uniform atmospheric features modulated by their rotation. SIMP J013656.5+093347.3 is a rapidly rotating isolated planetary-mass object, and previous infrared monitoring suggests complex atmospheric features rotating in and out of view. The physical nature of these features is not well understood, with clouds, temperature variations, thermochemical instabilities, and infrared-emitting aurora all proposed as contributing mechanisms. Here we report JWST time-resolved low-resolution spectroscopy from 0.8 - 11 micron of SIMP J013656.5+093347.3 which supports the presence of three specific features in the atmosphere: clouds, hot spots, and changing carbon chemistry. We show that no single mechanism can explain the variations in the time-resolved spectra. When combined with previous studies of this object indicating patchy clouds and aurorae, these measurements reveal the rich complexity of the atmosphere of SIMP J013656.5+093347.3. Gas giant planets in the solar system, specifically Jupiter and Saturn, also have multiple cloud layers and high-altitude hot spots, suggesting these phenomena are also present in worlds both within and beyond our solar-system. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16577v1-abstract-full').style.display = 'none'; document.getElementById('2411.16577v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to ApJ Letters</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.16415">arXiv:2411.16415</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.16415">pdf</a>, <a href="https://arxiv.org/format/2411.16415">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Solar and Stellar Astrophysics">astro-ph.SR</span> </div> </div> <p class="title is-5 mathjax"> Frozen-field Modeling of Coronal Condensations with MPI-AMRVAC II: Optimization and application in three-dimensional models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yuhao Zhou</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+X">Xiaohong Li</a>, <a href="/search/?searchtype=author&amp;query=Jenkins%2C+J+M">Jack M. Jenkins</a>, <a href="/search/?searchtype=author&amp;query=Hong%2C+J">Jie Hong</a>, <a href="/search/?searchtype=author&amp;query=Keppens%2C+R">Rony Keppens</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.16415v1-abstract-short" style="display: inline;"> The frozen-field hydrodynamic (ffHD) model is a simplification of the full magnetohydrodynamical (MHD) equations under the assumption of a rigid magnetic field, which significantly reduces computational complexity and enhances efficiency. In this work, we combine the ffHD prescription with hyperbolic thermal conduction (TC) and the Transition Region Adaptive Conduction (TRAC) method to achieve fur&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16415v1-abstract-full').style.display = 'inline'; document.getElementById('2411.16415v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.16415v1-abstract-full" style="display: none;"> The frozen-field hydrodynamic (ffHD) model is a simplification of the full magnetohydrodynamical (MHD) equations under the assumption of a rigid magnetic field, which significantly reduces computational complexity and enhances efficiency. In this work, we combine the ffHD prescription with hyperbolic thermal conduction (TC) and the Transition Region Adaptive Conduction (TRAC) method to achieve further optimization. A series of two-dimensional tests are done to evaluate the performance of the hyperbolic TC and the TRAC method. The results indicate that hyperbolic TC, while showing limiter-affected numerical dissipation, delivers outcomes comparable to classic parabolic TC. The TRAC method effectively compensates for the underestimation of enthalpy flux in low-resolution simulations, as evaluated on tests that demonstrate prominence formation. We present an application of the ffHD model that forms a three-dimensional prominence embedded in a magnetic flux rope, which develops into a stable slab-like filament. The simulation reveals a prominence with an elongated spine and a width consistent with observations, highlighting the potential of the ffHD model in capturing the dynamics of solar prominences. Forward modeling of the simulation data produces synthetic images at various wavelengths, providing insights into the appearance of prominences and filaments in different observational contexts. The ffHD model, with its computational efficiency and the demonstrated capability to simulate complex solar phenomena, offers a valuable tool for solar physicists, and is implemented in the open-source MPI-AMRVAC framework. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16415v1-abstract-full').style.display = 'none'; document.getElementById('2411.16415v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication in ApJ. 31 pages, 8 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.16300">arXiv:2411.16300</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.16300">pdf</a>, <a href="https://arxiv.org/format/2411.16300">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> BayLing 2: A Multilingual Large Language Model with Efficient Language Alignment </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhang%2C+S">Shaolei Zhang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+K">Kehao Zhang</a>, <a href="/search/?searchtype=author&amp;query=Fang%2C+Q">Qingkai Fang</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+S">Shoutao Guo</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yan Zhou</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+X">Xiaodong Liu</a>, <a href="/search/?searchtype=author&amp;query=Feng%2C+Y">Yang Feng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.16300v1-abstract-short" style="display: inline;"> Large language models (LLMs), with their powerful generative capabilities and vast knowledge, empower various tasks in everyday life. However, these abilities are primarily concentrated in high-resource languages, leaving low-resource languages with weaker generative capabilities and relatively limited knowledge. Enhancing the multilingual capabilities of LLMs is therefore crucial for serving over&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16300v1-abstract-full').style.display = 'inline'; document.getElementById('2411.16300v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.16300v1-abstract-full" style="display: none;"> Large language models (LLMs), with their powerful generative capabilities and vast knowledge, empower various tasks in everyday life. However, these abilities are primarily concentrated in high-resource languages, leaving low-resource languages with weaker generative capabilities and relatively limited knowledge. Enhancing the multilingual capabilities of LLMs is therefore crucial for serving over 100 linguistic communities worldwide. An intuitive approach to enhance the multilingual capabilities would be to construct instruction data for various languages, but constructing instruction data for over 100 languages is prohibitively costly. In this paper, we introduce BayLing 2, which efficiently transfers generative capabilities and knowledge from high-resource languages to low-resource languages through language alignment. To achieve this, we constructed a dataset of 3.2 million instructions, comprising high-resource language instructions (Chinese and English) and cross-lingual instructions for 100+ languages and performed instruction tuning based on the dataset to facilitate the capability transfer between languages. Using Llama as the foundation model, we developed BayLing-2-7B, BayLing-2-13B, and BayLing-3-8B, and conducted a comprehensive evaluation of BayLing. For multilingual translation across 100+ languages, BayLing shows superior performance compared to open-source models of similar scale. For multilingual knowledge and understanding benchmarks, BayLing achieves significant improvements across over 20 low-resource languages, demonstrating its capability of effective knowledge transfer from high-resource to low-resource languages. Furthermore, results on English benchmarks indicate that BayLing maintains high performance in highresource languages while enhancing the performance in low-resource languages. Demo, homepage, code and models of BayLing are available. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16300v1-abstract-full').style.display = 'none'; document.getElementById('2411.16300v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">BayLing 2&#39;s online demo: http://nlp.ict.ac.cn/bayling/demo. BayLing 2&#39;s code and models: https://github.com/ictnlp/BayLing</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.16170">arXiv:2411.16170</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.16170">pdf</a>, <a href="https://arxiv.org/format/2411.16170">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> CARE Transformer: Mobile-Friendly Linear Visual Transformer via Decoupled Dual Interaction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yuan Zhou</a>, <a href="/search/?searchtype=author&amp;query=Xu%2C+Q">Qingshan Xu</a>, <a href="/search/?searchtype=author&amp;query=Cui%2C+J">Jiequan Cui</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+J">Junbao Zhou</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+J">Jing Zhang</a>, <a href="/search/?searchtype=author&amp;query=Hong%2C+R">Richang Hong</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+H">Hanwang Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.16170v1-abstract-short" style="display: inline;"> Recently, large efforts have been made to design efficient linear-complexity visual Transformers. However, current linear attention models are generally unsuitable to be deployed in resource-constrained mobile devices, due to suffering from either few efficiency gains or significant accuracy drops. In this paper, we propose a new de\textbf{C}oupled du\textbf{A}l-interactive linea\textbf{R} att\tex&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16170v1-abstract-full').style.display = 'inline'; document.getElementById('2411.16170v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.16170v1-abstract-full" style="display: none;"> Recently, large efforts have been made to design efficient linear-complexity visual Transformers. However, current linear attention models are generally unsuitable to be deployed in resource-constrained mobile devices, due to suffering from either few efficiency gains or significant accuracy drops. In this paper, we propose a new de\textbf{C}oupled du\textbf{A}l-interactive linea\textbf{R} att\textbf{E}ntion (CARE) mechanism, revealing that features&#39; decoupling and interaction can fully unleash the power of linear attention. We first propose an asymmetrical feature decoupling strategy that asymmetrically decouples the learning process for local inductive bias and long-range dependencies, thereby preserving sufficient local and global information while effectively enhancing the efficiency of models. Then, a dynamic memory unit is employed to maintain critical information along the network pipeline. Moreover, we design a dual interaction module to effectively facilitate interaction between local inductive bias and long-range information as well as among features at different layers. By adopting a decoupled learning way and fully exploiting complementarity across features, our method can achieve both high efficiency and accuracy. Extensive experiments on ImageNet-1K, COCO, and ADE20K datasets demonstrate the effectiveness of our approach, e.g., achieving $78.4/82.1\%$ top-1 accuracy on ImagegNet-1K at the cost of only $0.7/1.9$ GMACs. Codes will be released on \href{..}{github}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16170v1-abstract-full').style.display = 'none'; document.getElementById('2411.16170v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.16144">arXiv:2411.16144</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.16144">pdf</a>, <a href="https://arxiv.org/format/2411.16144">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Using Drone Swarm to Stop Wildfire: A Predict-then-optimize Approach </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Pan%2C+S">Shijie Pan</a>, <a href="/search/?searchtype=author&amp;query=Cheng%2C+A">Aoran Cheng</a>, <a href="/search/?searchtype=author&amp;query=Sun%2C+Y">Yiqi Sun</a>, <a href="/search/?searchtype=author&amp;query=Kang%2C+K">Kai Kang</a>, <a href="/search/?searchtype=author&amp;query=Pais%2C+C">Cristobal Pais</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yulun Zhou</a>, <a href="/search/?searchtype=author&amp;query=Shen%2C+Z+M">Zuo-Jun Max Shen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.16144v1-abstract-short" style="display: inline;"> Drone swarms coupled with data intelligence can be the future of wildfire fighting. However, drone swarm firefighting faces enormous challenges, such as the highly complex environmental conditions in wildfire scenes, the highly dynamic nature of wildfire spread, and the significant computational complexity of drone swarm operations. We develop a predict-then-optimize approach to address these chal&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16144v1-abstract-full').style.display = 'inline'; document.getElementById('2411.16144v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.16144v1-abstract-full" style="display: none;"> Drone swarms coupled with data intelligence can be the future of wildfire fighting. However, drone swarm firefighting faces enormous challenges, such as the highly complex environmental conditions in wildfire scenes, the highly dynamic nature of wildfire spread, and the significant computational complexity of drone swarm operations. We develop a predict-then-optimize approach to address these challenges to enable effective drone swarm firefighting. First, we construct wildfire spread prediction convex neural network (Convex-NN) models based on real wildfire data. Then, we propose a mixed-integer programming (MIP) model coupled with dynamic programming (DP) to enable efficient drone swarm task planning. We further use chance-constrained robust optimization (CCRO) to ensure robust firefighting performances under varying situations. The formulated model is solved efficiently using Benders Decomposition and Branch-and-Cut algorithms. After 75 simulated wildfire environments training, the MIP+CCRO approach shows the best performance among several testing sets, reducing movements by 37.3\% compared to the plain MIP. It also significantly outperformed the GA baseline, which often failed to fully extinguish the fire. Eventually, we will conduct real-world fire spread and quenching experiments in the next stage for further validation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16144v1-abstract-full').style.display = 'none'; document.getElementById('2411.16144v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.16102">arXiv:2411.16102</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.16102">pdf</a>, <a href="https://arxiv.org/format/2411.16102">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> BlendServe: Optimizing Offline Inference for Auto-regressive Large Models with Resource-aware Batching </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhao%2C+Y">Yilong Zhao</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+S">Shuo Yang</a>, <a href="/search/?searchtype=author&amp;query=Zhu%2C+K">Kan Zhu</a>, <a href="/search/?searchtype=author&amp;query=Zheng%2C+L">Lianmin Zheng</a>, <a href="/search/?searchtype=author&amp;query=Kasikci%2C+B">Baris Kasikci</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yang Zhou</a>, <a href="/search/?searchtype=author&amp;query=Xing%2C+J">Jiarong Xing</a>, <a href="/search/?searchtype=author&amp;query=Stoica%2C+I">Ion Stoica</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.16102v1-abstract-short" style="display: inline;"> Offline batch inference, which leverages the flexibility of request batching to achieve higher throughput and lower costs, is becoming more popular for latency-insensitive applications. Meanwhile, recent progress in model capability and modality makes requests more diverse in compute and memory demands, creating unique opportunities for throughput improvement by resource overlapping. However, a re&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16102v1-abstract-full').style.display = 'inline'; document.getElementById('2411.16102v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.16102v1-abstract-full" style="display: none;"> Offline batch inference, which leverages the flexibility of request batching to achieve higher throughput and lower costs, is becoming more popular for latency-insensitive applications. Meanwhile, recent progress in model capability and modality makes requests more diverse in compute and memory demands, creating unique opportunities for throughput improvement by resource overlapping. However, a request schedule that maximizes resource overlapping can conflict with the schedule that maximizes prefix sharing, a widely-used performance optimization, causing sub-optimal inference throughput. We present BlendServe, a system that maximizes resource utilization of offline batch inference by combining the benefits of resource overlapping and prefix sharing using a resource-aware prefix tree. BlendServe exploits the relaxed latency requirements in offline batch inference to reorder and overlap requests with varied resource demands while ensuring high prefix sharing. We evaluate BlendServe on a variety of synthetic multi-modal workloads and show that it provides up to $1.44\times$ throughput boost compared to widely-used industry standards, vLLM and SGLang. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16102v1-abstract-full').style.display = 'none'; document.getElementById('2411.16102v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.16084">arXiv:2411.16084</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.16084">pdf</a>, <a href="https://arxiv.org/format/2411.16084">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Genomics">q-bio.GN</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Deciphering genomic codes using advanced NLP techniques: a scoping review </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Cheng%2C+S">Shuyan Cheng</a>, <a href="/search/?searchtype=author&amp;query=Wei%2C+Y">Yishu Wei</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yiliang Zhou</a>, <a href="/search/?searchtype=author&amp;query=Xu%2C+Z">Zihan Xu</a>, <a href="/search/?searchtype=author&amp;query=Wright%2C+D+N">Drew N Wright</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+J">Jinze Liu</a>, <a href="/search/?searchtype=author&amp;query=Peng%2C+Y">Yifan Peng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.16084v1-abstract-short" style="display: inline;"> Objectives: The vast and complex nature of human genomic sequencing data presents challenges for effective analysis. This review aims to investigate the application of Natural Language Processing (NLP) techniques, particularly Large Language Models (LLMs) and transformer architectures, in deciphering genomic codes, focusing on tokenization, transformer models, and regulatory annotation prediction.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16084v1-abstract-full').style.display = 'inline'; document.getElementById('2411.16084v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.16084v1-abstract-full" style="display: none;"> Objectives: The vast and complex nature of human genomic sequencing data presents challenges for effective analysis. This review aims to investigate the application of Natural Language Processing (NLP) techniques, particularly Large Language Models (LLMs) and transformer architectures, in deciphering genomic codes, focusing on tokenization, transformer models, and regulatory annotation prediction. The goal of this review is to assess data and model accessibility in the most recent literature, gaining a better understanding of the existing capabilities and constraints of these tools in processing genomic sequencing data. Methods: Following Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) guidelines, our scoping review was conducted across PubMed, Medline, Scopus, Web of Science, Embase, and ACM Digital Library. Studies were included if they focused on NLP methodologies applied to genomic sequencing data analysis, without restrictions on publication date or article type. Results: A total of 26 studies published between 2021 and April 2024 were selected for review. The review highlights that tokenization and transformer models enhance the processing and understanding of genomic data, with applications in predicting regulatory annotations like transcription-factor binding sites and chromatin accessibility. Discussion: The application of NLP and LLMs to genomic sequencing data interpretation is a promising field that can help streamline the processing of large-scale genomic data while also providing a better understanding of its complex structures. It has the potential to drive advancements in personalized medicine by offering more efficient and scalable solutions for genomic analysis. Further research is also needed to discuss and overcome current limitations, enhancing model transparency and applicability. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16084v1-abstract-full').style.display = 'none'; document.getElementById('2411.16084v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.15993">arXiv:2411.15993</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.15993">pdf</a>, <a href="https://arxiv.org/format/2411.15993">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Investigating Factuality in Long-Form Text Generation: The Roles of Self-Known and Self-Unknown </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Tu%2C+L">Lifu Tu</a>, <a href="/search/?searchtype=author&amp;query=Meng%2C+R">Rui Meng</a>, <a href="/search/?searchtype=author&amp;query=Joty%2C+S">Shafiq Joty</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yingbo Zhou</a>, <a href="/search/?searchtype=author&amp;query=Yavuz%2C+S">Semih Yavuz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.15993v1-abstract-short" style="display: inline;"> Large language models (LLMs) have demonstrated strong capabilities in text understanding and generation. However, they often lack factuality, producing a mixture of true and false information, especially in long-form generation. In this work, we investigates the factuality of long-form text generation across various large language models (LLMs), including GPT-4, Gemini-1.5-Pro, Claude-3-Opus, Llam&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15993v1-abstract-full').style.display = 'inline'; document.getElementById('2411.15993v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.15993v1-abstract-full" style="display: none;"> Large language models (LLMs) have demonstrated strong capabilities in text understanding and generation. However, they often lack factuality, producing a mixture of true and false information, especially in long-form generation. In this work, we investigates the factuality of long-form text generation across various large language models (LLMs), including GPT-4, Gemini-1.5-Pro, Claude-3-Opus, Llama-3-70B, and Mistral. Our analysis reveals that factuality scores tend to decline in later sentences of the generated text, accompanied by a rise in the number of unsupported claims. Furthermore, we explore the effectiveness of different evaluation settings to assess whether LLMs can accurately judge the correctness of their own outputs: Self-Known (the percentage of supported atomic claims, decomposed from LLM outputs, that the corresponding LLMs judge as correct) and Self-Unknown (the percentage of unsupported atomic claims that the corresponding LLMs judge as incorrect). The results indicate that even advanced models like GPT-4 and Gemini-1.5-Pro fail to achieve perfect Self-Known scores, while their Self-Unknown scores remain notably above zero, reflecting ongoing uncertainty in their self-assessments. Moreover, we find a correlation between higher Self-Known scores and improved factuality, while higher Self-Unknown scores are associated with lower factuality. Interestingly, even without significant changes in the models&#39; self-judgment (Self-Known and Self-Unknown), the number of unsupported claims can increases, likely as an artifact of long-form generation. These findings show the limitations of current LLMs in long-form generation, and provide valuable insights for improving factuality in long-form text generation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15993v1-abstract-full').style.display = 'none'; document.getElementById('2411.15993v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.15752">arXiv:2411.15752</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.15752">pdf</a>, <a href="https://arxiv.org/format/2411.15752">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Measurement of cross sections of $e^+e^-\to K^0_S K^0_S 蠄(3686)$ from $\sqrt{s}=$ 4.682 to 4.951 GeV </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&amp;query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&amp;query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&amp;query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&amp;query=An%2C+Q">Q. An</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&amp;query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&amp;query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&amp;query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&amp;query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&amp;query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&amp;query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&amp;query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&amp;query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&amp;query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&amp;query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&amp;query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&amp;query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&amp;query=Briere%2C+R+A">R. A. Briere</a> , et al. (642 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.15752v1-abstract-short" style="display: inline;"> The process $e^+e^-\to K^0_S K^0_S 蠄(3686)$ is studied by analyzing $e^+e^-$ collision data samples collected at eight center-of-mass energies ranging from 4.682 to 4.951 GeV with the BESIII detector operating at the BEPCII collider, corresponding to an integrated luminosity of $4.1~{\rm fb}^{-1}$. Observation of the $e^+e^-\to K^0_S K^0_S 蠄(3686)$ process is found for the first time with a statis&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15752v1-abstract-full').style.display = 'inline'; document.getElementById('2411.15752v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.15752v1-abstract-full" style="display: none;"> The process $e^+e^-\to K^0_S K^0_S 蠄(3686)$ is studied by analyzing $e^+e^-$ collision data samples collected at eight center-of-mass energies ranging from 4.682 to 4.951 GeV with the BESIII detector operating at the BEPCII collider, corresponding to an integrated luminosity of $4.1~{\rm fb}^{-1}$. Observation of the $e^+e^-\to K^0_S K^0_S 蠄(3686)$ process is found for the first time with a statistical significance of $6.3蟽$, and the cross sections at each center-of-mass energy are measured. The ratio of cross sections of $e^+e^-\to K_S^0 K_S^0 蠄(3686)$ relative to $e^+e^-\to K^+ K^- 蠄(3686)$ is determined to be $\frac{蟽(e^+e^-\to K_S^0 K_S^0 蠄(3686))}{蟽(e^+e^-\to K^+ K^- 蠄(3686))}=0.45 \pm 0.25$, which is consistent with the prediction based on isospin symmetry. The uncertainty includes both statistical and systematic contributions. Additionally, the $K_S^0蠄(3686)$ invariant mass distribution is found to be consistent with three-body phase space. The significance of a contribution beyond three-body phase space is only $0.8蟽$. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15752v1-abstract-full').style.display = 'none'; document.getElementById('2411.15752v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.15739">arXiv:2411.15739</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.15739">pdf</a>, <a href="https://arxiv.org/format/2411.15739">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Astrophysical Phenomena">astro-ph.HE</span> </div> </div> <p class="title is-5 mathjax"> A 44-minute periodic radio transient in a supernova remnant </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Li%2C+D">Di Li</a>, <a href="/search/?searchtype=author&amp;query=Yuan%2C+M">Mao Yuan</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+L">Lin Wu</a>, <a href="/search/?searchtype=author&amp;query=Yan%2C+J">Jingye Yan</a>, <a href="/search/?searchtype=author&amp;query=Lv%2C+X">Xuning Lv</a>, <a href="/search/?searchtype=author&amp;query=Tsai%2C+C">Chao-Wei Tsai</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+P">Pei Wang</a>, <a href="/search/?searchtype=author&amp;query=Zhu%2C+W">WeiWei Zhu</a>, <a href="/search/?searchtype=author&amp;query=Deng%2C+L">Li Deng</a>, <a href="/search/?searchtype=author&amp;query=Lan%2C+A">Ailan Lan</a>, <a href="/search/?searchtype=author&amp;query=Xu%2C+R">Renxin Xu</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xianglei Chen</a>, <a href="/search/?searchtype=author&amp;query=Meng%2C+L">Lingqi Meng</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+J">Jian Li</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+X">Xiangdong Li</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+P">Ping Zhou</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+H">Haoran Yang</a>, <a href="/search/?searchtype=author&amp;query=Xue%2C+M">Mengyao Xue</a>, <a href="/search/?searchtype=author&amp;query=Lu%2C+J">Jiguang Lu</a>, <a href="/search/?searchtype=author&amp;query=Miao%2C+C">Chenchen Miao</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+W">Weiyang Wang</a>, <a href="/search/?searchtype=author&amp;query=Niu%2C+J">Jiarui Niu</a>, <a href="/search/?searchtype=author&amp;query=Fang%2C+Z">Ziyao Fang</a>, <a href="/search/?searchtype=author&amp;query=Fu%2C+Q">Qiuyang Fu</a>, <a href="/search/?searchtype=author&amp;query=Feng%2C+Y">Yi Feng</a> , et al. (23 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.15739v1-abstract-short" style="display: inline;"> Long-period radio transients (LPTs) are a newly discovered class of radio emitters with yet incomprehensibly long rotation periods, ranging from minutes to hours. The astrophysical nature of their isolated counterparts remains undetermined. We report a new LPT, DART J1832-0911 (2656.23 $\pm$ 0.15 s period), the first evidence associating such objects to supernova remnants (SNRs). Its dispersion me&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15739v1-abstract-full').style.display = 'inline'; document.getElementById('2411.15739v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.15739v1-abstract-full" style="display: none;"> Long-period radio transients (LPTs) are a newly discovered class of radio emitters with yet incomprehensibly long rotation periods, ranging from minutes to hours. The astrophysical nature of their isolated counterparts remains undetermined. We report a new LPT, DART J1832-0911 (2656.23 $\pm$ 0.15 s period), the first evidence associating such objects to supernova remnants (SNRs). Its dispersion measure distance aligns well with the distance of the SNR, confirming its origin from a supernova explosion. The source displays either phase-locked circularly polarized emission or nearly 100% linear polarization in radio bands. No detectable optical counterpart was found, even with a 10 m class telescope. The J1832-0911&#39;s SNR association, stable, highly polarized emission, and abnormally long period strongly favor its origin from a young neutron star, whose spin has been braked, possibly by interaction with supernova&#39;s fallback materials. This discovery provides critical insights into the nature of ultra-long period transients and their evolutionary link to stellar remnants. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15739v1-abstract-full').style.display = 'none'; document.getElementById('2411.15739v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">36 pages, 12 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.15571">arXiv:2411.15571</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.15571">pdf</a>, <a href="https://arxiv.org/format/2411.15571">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> </div> </div> <p class="title is-5 mathjax"> Dephasing-assisted diffusive dynamics in superconducting quantum circuits </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Liang%2C+Y">Yongqi Liang</a>, <a href="/search/?searchtype=author&amp;query=Xie%2C+C">Changrong Xie</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+Z">Zechen Guo</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+P">Peisheng Huang</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+W">Wenhui Huang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+Y">Yiting Liu</a>, <a href="/search/?searchtype=author&amp;query=Qiu%2C+J">Jiawei Qiu</a>, <a href="/search/?searchtype=author&amp;query=Sun%2C+X">Xuandong Sun</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+Z">Zilin Wang</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+X">Xiaohan Yang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+J">Jiawei Zhang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+J">Jiajian Zhang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+L">Libo Zhang</a>, <a href="/search/?searchtype=author&amp;query=Chu%2C+J">Ji Chu</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+W">Weijie Guo</a>, <a href="/search/?searchtype=author&amp;query=Jiang%2C+J">Ji Jiang</a>, <a href="/search/?searchtype=author&amp;query=Linpeng%2C+X">Xiayu Linpeng</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+S">Song Liu</a>, <a href="/search/?searchtype=author&amp;query=Niu%2C+J">Jingjing Niu</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yuxuan Zhou</a>, <a href="/search/?searchtype=author&amp;query=Ren%2C+W">Wenhui Ren</a>, <a href="/search/?searchtype=author&amp;query=Tao%2C+Z">Ziyu Tao</a>, <a href="/search/?searchtype=author&amp;query=Zhong%2C+Y">Youpeng Zhong</a>, <a href="/search/?searchtype=author&amp;query=Yu%2C+D">Dapeng Yu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.15571v1-abstract-short" style="display: inline;"> Random fluctuations caused by environmental noise can lead to decoherence in quantum systems. Exploring and controlling such dissipative processes is both fundamentally intriguing and essential for harnessing quantum systems to achieve practical advantages and deeper insights. In this Letter, we first demonstrate the diffusive dynamics assisted by controlled dephasing noise in superconducting quan&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15571v1-abstract-full').style.display = 'inline'; document.getElementById('2411.15571v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.15571v1-abstract-full" style="display: none;"> Random fluctuations caused by environmental noise can lead to decoherence in quantum systems. Exploring and controlling such dissipative processes is both fundamentally intriguing and essential for harnessing quantum systems to achieve practical advantages and deeper insights. In this Letter, we first demonstrate the diffusive dynamics assisted by controlled dephasing noise in superconducting quantum circuits, contrasting with coherent evolution. We show that dephasing can enhance localization in a superconducting qubit array with quasiperiodic order, even in the regime where all eigenstates remain spatially extended for the coherent counterpart. Furthermore, by preparing different excitation distributions in the qubit array, we observe that a more localized initial state relaxes to a uniformly distributed mixed state faster with dephasing noise, illustrating another counterintuitive phenomenon called Mpemba effect, i.e., a far-from-equilibrium state can relax toward the equilibrium faster. These results deepen our understanding of diffusive dynamics at the microscopic level, and demonstrate controlled dissipative processes as a valuable tool for investigating Markovian open quantum systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15571v1-abstract-full').style.display = 'none'; document.getElementById('2411.15571v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7+10 pages, 4+9 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.15473">arXiv:2411.15473</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.15473">pdf</a>, <a href="https://arxiv.org/ps/2411.15473">ps</a>, <a href="https://arxiv.org/format/2411.15473">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Representation Theory">math.RT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Category Theory">math.CT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Rings and Algebras">math.RA</span> </div> </div> <p class="title is-5 mathjax"> Tilting theory for extended module categories </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yu Zhou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.15473v1-abstract-short" style="display: inline;"> In extended hearts of bounded $t$-structures on a triangulated category, we provide a Happel-Reiten-Smalo tilting theorem and a characterization for $s$-torsion pairs. Applying these to $m$-extended module categories, we characterize torsion pairs induced by $(m+1)$-term silting complexes. After establishing Auslander-Reiten theory in extended module categories, we introduce $蟿_{[m]}$-tilting pair&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15473v1-abstract-full').style.display = 'inline'; document.getElementById('2411.15473v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.15473v1-abstract-full" style="display: none;"> In extended hearts of bounded $t$-structures on a triangulated category, we provide a Happel-Reiten-Smalo tilting theorem and a characterization for $s$-torsion pairs. Applying these to $m$-extended module categories, we characterize torsion pairs induced by $(m+1)$-term silting complexes. After establishing Auslander-Reiten theory in extended module categories, we introduce $蟿_{[m]}$-tilting pairs and show bijections between $蟿_{[m]}$-tilting pairs, $(m+1)$-term silting complexes, and functorially finite $s$-torsion pairs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15473v1-abstract-full').style.display = 'none'; document.getElementById('2411.15473v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">28 pages, comments welcome</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.15441">arXiv:2411.15441</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.15441">pdf</a>, <a href="https://arxiv.org/format/2411.15441">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Study of $\it螞_{\it{b}}^\rm{0}$ and $\it螢_{\it{b}}^\rm{0}$ decays to $\it螞 h^+h^{&#39;-}$ and evidence for $CP$ violation in $\it螞_{\it{b}}^\rm{0}\to\it螞 K^+K^-$ decays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=LHCb+collaboration"> LHCb collaboration</a>, <a href="/search/?searchtype=author&amp;query=Aaij%2C+R">R. Aaij</a>, <a href="/search/?searchtype=author&amp;query=Abdelmotteleb%2C+A+S+W">A. S. W. Abdelmotteleb</a>, <a href="/search/?searchtype=author&amp;query=Beteta%2C+C+A">C. Abellan Beteta</a>, <a href="/search/?searchtype=author&amp;query=Abudin%C3%A9n%2C+F">F. Abudin茅n</a>, <a href="/search/?searchtype=author&amp;query=Ackernley%2C+T">T. Ackernley</a>, <a href="/search/?searchtype=author&amp;query=Adefisoye%2C+A+A">A. A. Adefisoye</a>, <a href="/search/?searchtype=author&amp;query=Adeva%2C+B">B. Adeva</a>, <a href="/search/?searchtype=author&amp;query=Adinolfi%2C+M">M. Adinolfi</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Agapopoulou%2C+C">C. Agapopoulou</a>, <a href="/search/?searchtype=author&amp;query=Aidala%2C+C+A">C. A. Aidala</a>, <a href="/search/?searchtype=author&amp;query=Ajaltouni%2C+Z">Z. Ajaltouni</a>, <a href="/search/?searchtype=author&amp;query=Akar%2C+S">S. Akar</a>, <a href="/search/?searchtype=author&amp;query=Akiba%2C+K">K. Akiba</a>, <a href="/search/?searchtype=author&amp;query=Albicocco%2C+P">P. Albicocco</a>, <a href="/search/?searchtype=author&amp;query=Albrecht%2C+J">J. Albrecht</a>, <a href="/search/?searchtype=author&amp;query=Alessio%2C+F">F. Alessio</a>, <a href="/search/?searchtype=author&amp;query=Alexander%2C+M">M. Alexander</a>, <a href="/search/?searchtype=author&amp;query=Aliouche%2C+Z">Z. Aliouche</a>, <a href="/search/?searchtype=author&amp;query=Cartelle%2C+P+A">P. Alvarez Cartelle</a>, <a href="/search/?searchtype=author&amp;query=Amalric%2C+R">R. Amalric</a>, <a href="/search/?searchtype=author&amp;query=Amato%2C+S">S. Amato</a>, <a href="/search/?searchtype=author&amp;query=Amey%2C+J+L">J. L. Amey</a>, <a href="/search/?searchtype=author&amp;query=Amhis%2C+Y">Y. Amhis</a> , et al. (1129 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.15441v1-abstract-short" style="display: inline;"> A study of $\it螞_{\it{b}}^\rm{0}$ and $\it螢_{\it{b}}^\rm{0}$ decays to $\it螞 h^{+} h^{\prime -}$ $(h^{(\prime)}=蟺, K)$ is performed using $pp$ collision data collected by the LHCb experiment during LHC Runs 1$-$2, corresponding to an integrated luminosity of $9~\rm{fb}^{-1}$. The branching fractions for these decays are measured using the $\it螞_{\it{b}}^\rm{0}\to\it螞_{\it{c}}^+(\to\it螞蟺^+)蟺^-$ dec&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15441v1-abstract-full').style.display = 'inline'; document.getElementById('2411.15441v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.15441v1-abstract-full" style="display: none;"> A study of $\it螞_{\it{b}}^\rm{0}$ and $\it螢_{\it{b}}^\rm{0}$ decays to $\it螞 h^{+} h^{\prime -}$ $(h^{(\prime)}=蟺, K)$ is performed using $pp$ collision data collected by the LHCb experiment during LHC Runs 1$-$2, corresponding to an integrated luminosity of $9~\rm{fb}^{-1}$. The branching fractions for these decays are measured using the $\it螞_{\it{b}}^\rm{0}\to\it螞_{\it{c}}^+(\to\it螞蟺^+)蟺^-$ decay as control channel. The decays $\it螞_{\it{b}}^\rm{0}\to\it螞蟺^+蟺^-$ and $\it螢_{\it{b}}^\rm{0}\to\it螞K^-蟺^+$ are observed for the first time. For decay modes with sufficient signal yields, $CP$ asymmetries are measured in the full and localized regions of the final-state phase space. Evidence is found for $CP$ violation in the $\it螞_{\it{b}}^\rm{0}\to\it螞K^+K^-$ decay, interpreted as originating primarily from an asymmetric $\it螞_{\it{b}}^\rm{0} \to \it{N}^{*+} \it{K}^-$ decay amplitude. The measured $CP$ asymmetries for the other decays are compatible with zero. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15441v1-abstract-full').style.display = 'none'; document.getElementById('2411.15441v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">All figures and tables, along with any supplementary material and additional information, are available at https://cern.ch/lhcbproject/Publications/p/LHCb-PAPER-2024-043.html (LHCb public pages)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> LHCb-PAPER-2024-043, CERN-EP-2024-281 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14893">arXiv:2411.14893</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14893">pdf</a>, <a href="https://arxiv.org/format/2411.14893">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="General Relativity and Quantum Cosmology">gr-qc</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Methods for Astrophysics">astro-ph.IM</span> </div> </div> <p class="title is-5 mathjax"> Rapid eccentric spin-aligned binary black hole waveform generation based on deep learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Shi%2C+R">Ruijun Shi</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yue Zhou</a>, <a href="/search/?searchtype=author&amp;query=Zhao%2C+T">Tianyu Zhao</a>, <a href="/search/?searchtype=author&amp;query=Ren%2C+Z">Zhixiang Ren</a>, <a href="/search/?searchtype=author&amp;query=Cao%2C+Z">Zhoujian Cao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14893v1-abstract-short" style="display: inline;"> Accurate waveform templates of binary black holes (BBHs) with eccentric orbits are essential for the detection and precise parameter estimation of gravitational waves (GWs). While SEOBNRE produces accurate time-domain waveforms for eccentric BBH systems, its generation speed remains a critical bottleneck in analyzing such systems. Accelerating template generation is crucial to data analysis improv&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14893v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14893v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14893v1-abstract-full" style="display: none;"> Accurate waveform templates of binary black holes (BBHs) with eccentric orbits are essential for the detection and precise parameter estimation of gravitational waves (GWs). While SEOBNRE produces accurate time-domain waveforms for eccentric BBH systems, its generation speed remains a critical bottleneck in analyzing such systems. Accelerating template generation is crucial to data analysis improvement and valuable information extraction from observational data. We present SEOBNRE_AIq5e2, an innovative AI-based surrogate model that crafted to accelerate waveform generation for eccentric, spin-aligned BBH systems. SEOBNRE_AIq5e2 incorporates an advanced adaptive resampling technique during training, enabling the generation of eccentric BBH waveforms with mass ratios up to 5, eccentricities below 0.2, and spins $|蠂_z|$ up to 0.6. It achieves an impressive generation speed of 4.3 ms per waveform with a mean mismatch of $1.02 \times 10^{-3}$. With the exceptional accuracy and rapid performance, SEOBNRE_AIq5e2 emerges as a promising waveform template for future analysis of eccentric gravitational wave data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14893v1-abstract-full').style.display = 'none'; document.getElementById('2411.14893v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14853">arXiv:2411.14853</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14853">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Physics Education">physics.ed-ph</span> </div> </div> <p class="title is-5 mathjax"> Enhancing Undergraduate Physics Education: A Pedagogical Exploration of the Wheatstone Bridge with Symmetry and Simulation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yong Zhou</a>, <a href="/search/?searchtype=author&amp;query=Peng%2C+Z">Ze-yan Peng</a>, <a href="/search/?searchtype=author&amp;query=Xiao%2C+Y">Yan Xiao</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+W">Wen-mei Guo</a>, <a href="/search/?searchtype=author&amp;query=Yao%2C+G">Guan-xin Yao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14853v1-abstract-short" style="display: inline;"> The Wheatstone bridge experiment is a fundamental component of the undergraduate physics curriculum, traditionally taught as a hands-on activity to explore resistance measurement and circuit balancing. This study introduces a pedagogical approach that combines qualitative symmetry-based analysis with quantitative computer-based sensitivity simulations, enhancing students&#39; understanding and engagem&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14853v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14853v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14853v1-abstract-full" style="display: none;"> The Wheatstone bridge experiment is a fundamental component of the undergraduate physics curriculum, traditionally taught as a hands-on activity to explore resistance measurement and circuit balancing. This study introduces a pedagogical approach that combines qualitative symmetry-based analysis with quantitative computer-based sensitivity simulations, enhancing students&#39; understanding and engagement. A key focus is demonstrating how balanced configurations in the Wheatstone bridge affect circuit sensitivity, providing students with an intuitive grasp of the relationship between symmetry and measurement accuracy. Using simulations, we investigate the impact of internal resistances in the galvanometer and power supply, showing that lower resistances increase the circuit&#39;s sensitivity to minor resistance changes. Additionally, various resistor configurations are analyzed, highlighting the effect of careful adjustments in achieving maximal sensitivity. A Bayesian optimization-based software tool was developed to guide students in selecting optimal component values, thereby minimizing the need for manual adjustments while ensuring accurate measurements. This integrated approach bridges traditional experimentation with computational techniques, fostering critical thinking and preparing students for modern scientific practices. The study demonstrates that this enriched methodology significantly enhances the learning experience, equipping students with valuable skills in both experimental and computational aspects of physics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14853v1-abstract-full').style.display = 'none'; document.getElementById('2411.14853v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14796">arXiv:2411.14796</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14796">pdf</a>, <a href="https://arxiv.org/format/2411.14796">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Adaptive Hyper-Graph Convolution Network for Skeleton-based Human Action Recognition with Virtual Connections </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Youwei Zhou</a>, <a href="/search/?searchtype=author&amp;query=Xu%2C+T">Tianyang Xu</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+C">Cong Wu</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+X">Xiaojun Wu</a>, <a href="/search/?searchtype=author&amp;query=Kittler%2C+J">Josef Kittler</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14796v1-abstract-short" style="display: inline;"> The shared topology of human skeletons motivated the recent investigation of graph convolutional network (GCN) solutions for action recognition. However, the existing GCNs rely on the binary connection of two neighbouring vertices (joints) formed by an edge (bone), overlooking the potential of constructing multi-vertex convolution structures. In this paper we address this oversight and explore the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14796v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14796v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14796v1-abstract-full" style="display: none;"> The shared topology of human skeletons motivated the recent investigation of graph convolutional network (GCN) solutions for action recognition. However, the existing GCNs rely on the binary connection of two neighbouring vertices (joints) formed by an edge (bone), overlooking the potential of constructing multi-vertex convolution structures. In this paper we address this oversight and explore the merits of a hyper-graph convolutional network (Hyper-GCN) to achieve the aggregation of rich semantic information conveyed by skeleton vertices. In particular, our Hyper-GCN adaptively optimises multi-scale hyper-graphs during training, revealing the action-driven multi-vertex relations. Besides, virtual connections are often designed to support efficient feature aggregation, implicitly extending the spectrum of dependencies within the skeleton. By injecting virtual connections into hyper-graphs, the semantic clues of diverse action categories can be highlighted. The results of experiments conducted on the NTU-60, NTU-120, and NW-UCLA datasets, demonstrate the merits of our Hyper-GCN, compared to the state-of-the-art methods. Specifically, we outperform the existing solutions on NTU-120, achieving 90.2\% and 91.4\% in terms of the top-1 recognition accuracy on X-Sub and X-Set. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14796v1-abstract-full').style.display = 'none'; document.getElementById('2411.14796v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14709">arXiv:2411.14709</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14709">pdf</a>, <a href="https://arxiv.org/format/2411.14709">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Earth and Planetary Astrophysics">astro-ph.EP</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1093/mnrasl/slae109">10.1093/mnrasl/slae109 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> The Moon-forming Impact as a Constraint for the Inner Solar System&#39;s Formation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Fang%2C+T">Tong Fang</a>, <a href="/search/?searchtype=author&amp;query=Bi%2C+R">Rongxi Bi</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+H">Hui Zhang</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">You Zhou</a>, <a href="/search/?searchtype=author&amp;query=Reinhardt%2C+C">Christian Reinhardt</a>, <a href="/search/?searchtype=author&amp;query=Deng%2C+H">Hongping Deng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14709v1-abstract-short" style="display: inline;"> The solar system planets are benchmarks for the planet formation theory. Yet two paradigms coexist for the four terrestrial planets: the prolonged collisional growth among planetesimals lasting $&gt;100$ million years (Myr) and the fast formation via planetesimals accreting pebbles within 10 Myr. Despite their dramatic difference, we can hardly tell which theory is more relevant to the true history o&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14709v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14709v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14709v1-abstract-full" style="display: none;"> The solar system planets are benchmarks for the planet formation theory. Yet two paradigms coexist for the four terrestrial planets: the prolonged collisional growth among planetesimals lasting $&gt;100$ million years (Myr) and the fast formation via planetesimals accreting pebbles within 10 Myr. Despite their dramatic difference, we can hardly tell which theory is more relevant to the true history of the terrestrial planets&#39; formation. Here, we show that the Moon&#39;s origin puts stringent constraints on the pebble accretion scenario, rendering it less favourable. In the pebble accretion model, the one-off giant impact between proto-Earth and Theia rarely (probability $&lt;$ 1\textperthousand) occurs at the right timing and configuration for the Moon formation. Even if a potential impact happens by chance, giant impact simulations reveal perfect mixing between proto-Earth and Theia, leaving no room for the observed primordial Earth mantle heterogeneity and the compositional difference, though small, between Earth and the Moon. Thus, the Earth-Moon system along other terrestrial planets should preferably form from chaotic collisional growth in the inner solar system. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14709v1-abstract-full').style.display = 'none'; document.getElementById('2411.14709v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 6 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14461">arXiv:2411.14461</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14461">pdf</a>, <a href="https://arxiv.org/format/2411.14461">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> Towards Next-Generation Medical Agent: How o1 is Reshaping Decision-Making in Medical Scenarios </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Xu%2C+S">Shaochen Xu</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yifan Zhou</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+Z">Zhengliang Liu</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+Z">Zihao Wu</a>, <a href="/search/?searchtype=author&amp;query=Zhong%2C+T">Tianyang Zhong</a>, <a href="/search/?searchtype=author&amp;query=Zhao%2C+H">Huaqin Zhao</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+Y">Yiwei Li</a>, <a href="/search/?searchtype=author&amp;query=Jiang%2C+H">Hanqi Jiang</a>, <a href="/search/?searchtype=author&amp;query=Pan%2C+Y">Yi Pan</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+J">Junhao Chen</a>, <a href="/search/?searchtype=author&amp;query=Lu%2C+J">Jin Lu</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+W">Wei Zhang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+T">Tuo Zhang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+L">Lu Zhang</a>, <a href="/search/?searchtype=author&amp;query=Zhu%2C+D">Dajiang Zhu</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+X">Xiang Li</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+W">Wei Liu</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+Q">Quanzheng Li</a>, <a href="/search/?searchtype=author&amp;query=Sikora%2C+A">Andrea Sikora</a>, <a href="/search/?searchtype=author&amp;query=Zhai%2C+X">Xiaoming Zhai</a>, <a href="/search/?searchtype=author&amp;query=Xiang%2C+Z">Zhen Xiang</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+T">Tianming Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14461v1-abstract-short" style="display: inline;"> Artificial Intelligence (AI) has become essential in modern healthcare, with large language models (LLMs) offering promising advances in clinical decision-making. Traditional model-based approaches, including those leveraging in-context demonstrations and those with specialized medical fine-tuning, have demonstrated strong performance in medical language processing but struggle with real-time adap&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14461v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14461v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14461v1-abstract-full" style="display: none;"> Artificial Intelligence (AI) has become essential in modern healthcare, with large language models (LLMs) offering promising advances in clinical decision-making. Traditional model-based approaches, including those leveraging in-context demonstrations and those with specialized medical fine-tuning, have demonstrated strong performance in medical language processing but struggle with real-time adaptability, multi-step reasoning, and handling complex medical tasks. Agent-based AI systems address these limitations by incorporating reasoning traces, tool selection based on context, knowledge retrieval, and both short- and long-term memory. These additional features enable the medical AI agent to handle complex medical scenarios where decision-making should be built on real-time interaction with the environment. Therefore, unlike conventional model-based approaches that treat medical queries as isolated questions, medical AI agents approach them as complex tasks and behave more like human doctors. In this paper, we study the choice of the backbone LLM for medical AI agents, which is the foundation for the agent&#39;s overall reasoning and action generation. In particular, we consider the emergent o1 model and examine its impact on agents&#39; reasoning, tool-use adaptability, and real-time information retrieval across diverse clinical scenarios, including high-stakes settings such as intensive care units (ICUs). Our findings demonstrate o1&#39;s ability to enhance diagnostic accuracy and consistency, paving the way for smarter, more responsive AI tools that support better patient outcomes and decision-making efficacy in clinical practice. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14461v1-abstract-full').style.display = 'none'; document.getElementById('2411.14461v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14415">arXiv:2411.14415</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14415">pdf</a>, <a href="https://arxiv.org/format/2411.14415">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Strongly Correlated Electrons">cond-mat.str-el</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Materials Science">cond-mat.mtrl-sci</span> </div> </div> <p class="title is-5 mathjax"> Ground-state magnetic structures of topological kagome metals RV$_6$Sn$_6$ (R = Tb, Dy, Ho, Er) </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yishui Zhou</a>, <a href="/search/?searchtype=author&amp;query=Lee%2C+M">Min-Kai Lee</a>, <a href="/search/?searchtype=author&amp;query=Hammouda%2C+S">Sabreen Hammouda</a>, <a href="/search/?searchtype=author&amp;query=Devi%2C+S">Sheetal Devi</a>, <a href="/search/?searchtype=author&amp;query=Yano%2C+S">Shin-Ichiro Yano</a>, <a href="/search/?searchtype=author&amp;query=Sibille%2C+R">Romain Sibille</a>, <a href="/search/?searchtype=author&amp;query=Zaharko%2C+O">Oksana Zaharko</a>, <a href="/search/?searchtype=author&amp;query=Schmidt%2C+W">Wolfgang Schmidt</a>, <a href="/search/?searchtype=author&amp;query=Schmalzl%2C+K">Karin Schmalzl</a>, <a href="/search/?searchtype=author&amp;query=Beauvois%2C+K">Ketty Beauvois</a>, <a href="/search/?searchtype=author&amp;query=Ressouche%2C+E">Eric Ressouche</a>, <a href="/search/?searchtype=author&amp;query=Chang%2C+P">Po-Chun Chang</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+C">Chun-Hao Huang</a>, <a href="/search/?searchtype=author&amp;query=Chang%2C+L">Lieh-Jeng Chang</a>, <a href="/search/?searchtype=author&amp;query=Br%C3%BCckel%2C+T">Thomas Br眉ckel</a>, <a href="/search/?searchtype=author&amp;query=Su%2C+Y">Yixi Su</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14415v1-abstract-short" style="display: inline;"> Magnetic kagome metals have attracted tremendous research interests recently, because they represent an ideal playground for exploring the fascinating interplay between their intrinsically inherited topologically non-trivial electron band structures, magnetism and electronic correlation effects, and the resultant novel electronic/magnetic states and emergent excitations. In this work, we report a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14415v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14415v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14415v1-abstract-full" style="display: none;"> Magnetic kagome metals have attracted tremendous research interests recently, because they represent an ideal playground for exploring the fascinating interplay between their intrinsically inherited topologically non-trivial electron band structures, magnetism and electronic correlation effects, and the resultant novel electronic/magnetic states and emergent excitations. In this work, we report a comprehensive single-crystal neutron diffraction investigation of the ground-state magnetic structures of the recently discovered V-based topological kagome metals RV$_6$Sn$_6$ (R = Tb, Dy, Ho, Er). Furthermore, the sample synthesis details and our systematic studies of crystal structure, low-temperature magnetic and thermodynamic properties of these compounds via various in-house characterization techniques are also reported. It can be revealed that RV$_6$Sn$_6$ (R = Tb, Dy, Ho) have a collinear ferromagnetic order in the ground state, with the ordered magnetic moment aligned along the c axis for R = Tb, Ho, while approximately 20${^\circ}$ tilted off from the c axis for R = Dy. In contrast, ErV$_6$Sn$_6$ shows an A-type antiferromagnetic structure with a magnetic propagation vector k = (0, 0, 0.5), and with the ordered magnetic moment aligned in the ab plane. A comparison of the low-temperature magnetic structures for both the extensively investigated topological kagome metal series of RV$_6$Sn$_6$ and RMn$_6$Sn$_6$ is given in details. This allows to gain new insights into the complex magnetic interactions, diverse single-ion magnetic anisotropies and spin dynamics in these compounds. The reported ground-state magnetic structures in RV$_6$Sn$_6$ (R = Tb, Dy, Ho, Er) can pave the way for further explorations of the possible interplay between magnetism and topologically non-trivial electron band structures in the magnetically ordered phase regime. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14415v1-abstract-full').style.display = 'none'; document.getElementById('2411.14415v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in Phys. Rev. Research</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14384">arXiv:2411.14384</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14384">pdf</a>, <a href="https://arxiv.org/format/2411.14384">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Graphics">cs.GR</span> </div> </div> <p class="title is-5 mathjax"> Baking Gaussian Splatting into Diffusion Denoiser for Fast and Scalable Single-stage Image-to-3D Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Cai%2C+Y">Yuanhao Cai</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+H">He Zhang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+K">Kai Zhang</a>, <a href="/search/?searchtype=author&amp;query=Liang%2C+Y">Yixun Liang</a>, <a href="/search/?searchtype=author&amp;query=Ren%2C+M">Mengwei Ren</a>, <a href="/search/?searchtype=author&amp;query=Luan%2C+F">Fujun Luan</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+Q">Qing Liu</a>, <a href="/search/?searchtype=author&amp;query=Kim%2C+S+Y">Soo Ye Kim</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+J">Jianming Zhang</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+Z">Zhifei Zhang</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yuqian Zhou</a>, <a href="/search/?searchtype=author&amp;query=Lin%2C+Z">Zhe Lin</a>, <a href="/search/?searchtype=author&amp;query=Yuille%2C+A">Alan Yuille</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14384v2-abstract-short" style="display: inline;"> Existing feed-forward image-to-3D methods mainly rely on 2D multi-view diffusion models that cannot guarantee 3D consistency. These methods easily collapse when changing the prompt view direction and mainly handle object-centric prompt images. In this paper, we propose a novel single-stage 3D diffusion model, DiffusionGS, for object and scene generation from a single view. DiffusionGS directly out&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14384v2-abstract-full').style.display = 'inline'; document.getElementById('2411.14384v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14384v2-abstract-full" style="display: none;"> Existing feed-forward image-to-3D methods mainly rely on 2D multi-view diffusion models that cannot guarantee 3D consistency. These methods easily collapse when changing the prompt view direction and mainly handle object-centric prompt images. In this paper, we propose a novel single-stage 3D diffusion model, DiffusionGS, for object and scene generation from a single view. DiffusionGS directly outputs 3D Gaussian point clouds at each timestep to enforce view consistency and allow the model to generate robustly given prompt views of any directions, beyond object-centric inputs. Plus, to improve the capability and generalization ability of DiffusionGS, we scale up 3D training data by developing a scene-object mixed training strategy. Experiments show that our method enjoys better generation quality (2.20 dB higher in PSNR and 23.25 lower in FID) and over 5x faster speed (~6s on an A100 GPU) than SOTA methods. The user study and text-to-3D applications also reveals the practical values of our method. Our Project page at https://caiyuanhao1998.github.io/project/DiffusionGS/ shows the video and interactive generation results. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14384v2-abstract-full').style.display = 'none'; document.getElementById('2411.14384v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">A novel one-stage 3DGS-based diffusion generates objects and scenes from a single view in ~6 seconds</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14355">arXiv:2411.14355</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14355">pdf</a>, <a href="https://arxiv.org/format/2411.14355">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Nuclear Experiment">nucl-ex</span> </div> </div> <p class="title is-5 mathjax"> Measurement of two-neutrino double electron capture half-life of $^{124}$Xe with PandaX-4T </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=PandaX+Collaboration"> PandaX Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Bo%2C+Z">Zihao Bo</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+W">Wei Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xun Chen</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+Y">Yunhua Chen</a>, <a href="/search/?searchtype=author&amp;query=Cheng%2C+Z">Zhaokan Cheng</a>, <a href="/search/?searchtype=author&amp;query=Cui%2C+X">Xiangyi Cui</a>, <a href="/search/?searchtype=author&amp;query=Fan%2C+Y">Yingjie Fan</a>, <a href="/search/?searchtype=author&amp;query=Fang%2C+D">Deqing Fang</a>, <a href="/search/?searchtype=author&amp;query=Gao%2C+Z">Zhixing Gao</a>, <a href="/search/?searchtype=author&amp;query=Geng%2C+L">Lisheng Geng</a>, <a href="/search/?searchtype=author&amp;query=Giboni%2C+K">Karl Giboni</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+X">Xunan Guo</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+X">Xuyuan Guo</a>, <a href="/search/?searchtype=author&amp;query=Guo%2C+Z">Zichao Guo</a>, <a href="/search/?searchtype=author&amp;query=Han%2C+C">Chencheng Han</a>, <a href="/search/?searchtype=author&amp;query=Han%2C+K">Ke Han</a>, <a href="/search/?searchtype=author&amp;query=He%2C+C">Changda He</a>, <a href="/search/?searchtype=author&amp;query=He%2C+J">Jinrong He</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+D">Di Huang</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+H">Houqi Huang</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+J">Junting Huang</a>, <a href="/search/?searchtype=author&amp;query=Hou%2C+R">Ruquan Hou</a>, <a href="/search/?searchtype=author&amp;query=Hou%2C+Y">Yu Hou</a>, <a href="/search/?searchtype=author&amp;query=Ji%2C+X">Xiangdong Ji</a> , et al. (77 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14355v1-abstract-short" style="display: inline;"> Detailed studies of two-neutrino double electron capture (2$谓$DEC) is a crucial step towards searching for the neutrino-less mode to explore the Majorana nature of neutrinos. We have measured precisely the half-life of the 2$谓$DEC process in $^{124}$Xe, utilizing a total exposure of 1.73 tonne$\cdot$year from the commissioning run and the first science run of the PandaX-4T experiment. A time-depen&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14355v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14355v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14355v1-abstract-full" style="display: none;"> Detailed studies of two-neutrino double electron capture (2$谓$DEC) is a crucial step towards searching for the neutrino-less mode to explore the Majorana nature of neutrinos. We have measured precisely the half-life of the 2$谓$DEC process in $^{124}$Xe, utilizing a total exposure of 1.73 tonne$\cdot$year from the commissioning run and the first science run of the PandaX-4T experiment. A time-dependent background model in the $\mathcal{O}$(10 keV) energy is constructed for the first time in PandaX-4T data. With an unbinned maximum likelihood fit, we determine the half-life of the 2$谓$DEC process to be $(1.03\pm0.15_{\rm stat}\pm0.06_{\rm sys})\times 10^{22}$$\,$yr. Furthermore, we have evaluated the branching ratio for both electrons captured from the $K$ shell ($KK$) to be $(65\pm5)\%$, which aligns with the $^{124}$Xe nuclear model calculations within 1.5$\,$$蟽$. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14355v1-abstract-full').style.display = 'none'; document.getElementById('2411.14355v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 5 figures, 3 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14112">arXiv:2411.14112</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14112">pdf</a>, <a href="https://arxiv.org/ps/2411.14112">ps</a>, <a href="https://arxiv.org/format/2411.14112">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Differential Geometry">math.DG</span> </div> </div> <p class="title is-5 mathjax"> Rigidity Results for Compact Submanifolds with Pinched Ricci Curvature in Euclidean and Spherical Space Forms </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Ge%2C+J">Jianquan Ge</a>, <a href="/search/?searchtype=author&amp;query=Tao%2C+Y">Ya Tao</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yi Zhou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14112v1-abstract-short" style="display: inline;"> For compact submanifolds in Euclidean and Spherical space forms with Ricci curvature bounded below by a function $伪(n,k,H,c)$ of mean curvature, we prove that the submanifold is either isometric to the Einstein Clifford torus, or a topological sphere for the maximal bound $伪(n,[\frac{n}{2}],H,c)$, or has up to $k$-th homology groups vanishing. This gives an almost complete (except for the differen&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14112v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14112v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14112v1-abstract-full" style="display: none;"> For compact submanifolds in Euclidean and Spherical space forms with Ricci curvature bounded below by a function $伪(n,k,H,c)$ of mean curvature, we prove that the submanifold is either isometric to the Einstein Clifford torus, or a topological sphere for the maximal bound $伪(n,[\frac{n}{2}],H,c)$, or has up to $k$-th homology groups vanishing. This gives an almost complete (except for the differentiable sphere theorem) characterization of compact submanifolds with pinched Ricci curvature, generalizing celebrated rigidity results obtained by Ejiri, Xu-Tian, Xu-Gu, Xu-Leng-Gu, Vlachos, Dajczer-Vlachos. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14112v1-abstract-full').style.display = 'none'; document.getElementById('2411.14112v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, any comments are welcome</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 53C20; 53C24; 53C40 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13814">arXiv:2411.13814</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13814">pdf</a>, <a href="https://arxiv.org/format/2411.13814">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> AutoMixQ: Self-Adjusting Quantization for High Performance Memory-Efficient Fine-Tuning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhou%2C+C">Changhai Zhou</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+S">Shiyang Zhang</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yuhua Zhou</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+Z">Zekai Liu</a>, <a href="/search/?searchtype=author&amp;query=Weng%2C+S">Shichao Weng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13814v1-abstract-short" style="display: inline;"> Fine-tuning large language models (LLMs) under resource constraints is a significant challenge in deep learning. Low-Rank Adaptation (LoRA), pruning, and quantization are all effective methods for improving resource efficiency. However, combining them directly often results in suboptimal performance, especially with uniform quantization across all model layers. This is due to the complex, uneven i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13814v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13814v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13814v1-abstract-full" style="display: none;"> Fine-tuning large language models (LLMs) under resource constraints is a significant challenge in deep learning. Low-Rank Adaptation (LoRA), pruning, and quantization are all effective methods for improving resource efficiency. However, combining them directly often results in suboptimal performance, especially with uniform quantization across all model layers. This is due to the complex, uneven interlayer relationships introduced by pruning, necessitating more refined quantization strategies. To address this, we propose AutoMixQ, an end-to-end optimization framework that selects optimal quantization configurations for each LLM layer. AutoMixQ leverages lightweight performance models to guide the selection process, significantly reducing time and computational resources compared to exhaustive search methods. By incorporating Pareto optimality, AutoMixQ balances memory usage and performance, approaching the upper bounds of model capability under strict resource constraints. Our experiments on widely used benchmarks show that AutoMixQ reduces memory consumption while achieving superior performance. For example, at a 30\% pruning rate in LLaMA-7B, AutoMixQ achieved 66.21\% on BoolQ compared to 62.45\% for LoRA and 58.96\% for LoftQ, while reducing memory consumption by 35.5\% compared to LoRA and 27.5\% compared to LoftQ. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13814v1-abstract-full').style.display = 'none'; document.getElementById('2411.13814v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13740">arXiv:2411.13740</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13740">pdf</a>, <a href="https://arxiv.org/format/2411.13740">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> Federated Continual Learning for Edge-AI: A Comprehensive Survey </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Wang%2C+Z">Zi Wang</a>, <a href="/search/?searchtype=author&amp;query=Wu%2C+F">Fei Wu</a>, <a href="/search/?searchtype=author&amp;query=Yu%2C+F">Feng Yu</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yurui Zhou</a>, <a href="/search/?searchtype=author&amp;query=Hu%2C+J">Jia Hu</a>, <a href="/search/?searchtype=author&amp;query=Min%2C+G">Geyong Min</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13740v1-abstract-short" style="display: inline;"> Edge-AI, the convergence of edge computing and artificial intelligence (AI), has become a promising paradigm that enables the deployment of advanced AI models at the network edge, close to users. In Edge-AI, federated continual learning (FCL) has emerged as an imperative framework, which fuses knowledge from different clients while preserving data privacy and retaining knowledge from previous task&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13740v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13740v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13740v1-abstract-full" style="display: none;"> Edge-AI, the convergence of edge computing and artificial intelligence (AI), has become a promising paradigm that enables the deployment of advanced AI models at the network edge, close to users. In Edge-AI, federated continual learning (FCL) has emerged as an imperative framework, which fuses knowledge from different clients while preserving data privacy and retaining knowledge from previous tasks as it learns new ones. By so doing, FCL aims to ensure stable and reliable performance of learning models in dynamic and distributed environments. In this survey, we thoroughly review the state-of-the-art research and present the first comprehensive survey of FCL for Edge-AI. We categorize FCL methods based on three task characteristics: federated class continual learning, federated domain continual learning, and federated task continual learning. For each category, an in-depth investigation and review of the representative methods are provided, covering background, challenges, problem formalisation, solutions, and limitations. Besides, existing real-world applications empowered by FCL are reviewed, indicating the current progress and potential of FCL in diverse application domains. Furthermore, we discuss and highlight several prospective research directions of FCL such as algorithm-hardware co-design for FCL and FCL with foundation models, which could provide insights into the future development and practical deployment of FCL in the era of Edge-AI. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13740v1-abstract-full').style.display = 'none'; document.getElementById('2411.13740v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13456">arXiv:2411.13456</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13456">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Why Anticipatory Sensing Matters in Commercial ACC Systems under Cut-In Scenarios: A Perspective from Stochastic Safety Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhang%2C+H">Hao Zhang</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+S">Sixu Li</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+Z">Zihao Li</a>, <a href="/search/?searchtype=author&amp;query=Anis%2C+M">Mohammad Anis</a>, <a href="/search/?searchtype=author&amp;query=Lord%2C+D">Dominique Lord</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yang Zhou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13456v1-abstract-short" style="display: inline;"> This study presents an analytical solution for the vehicle state evolution of Adaptive Cruise Control (ACC) systems under cut-in scenarios, incorporating sensing delays and anticipation using the Lambert W function. The theoretical analysis demonstrates that the vehicle state evolution and the corresponding safety of ACC in cut-in situations are influenced by multiple factors, including the origin&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13456v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13456v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13456v1-abstract-full" style="display: none;"> This study presents an analytical solution for the vehicle state evolution of Adaptive Cruise Control (ACC) systems under cut-in scenarios, incorporating sensing delays and anticipation using the Lambert W function. The theoretical analysis demonstrates that the vehicle state evolution and the corresponding safety of ACC in cut-in situations are influenced by multiple factors, including the original leading vehicle&#39;s state, the initial conditions of the cut-in vehicle, subsequent cut-in maneuvers, sensing delays, and the ACC&#39;s anticipation capabilities. To quantitatively assess these influences, a series of numerical experiments were conducted to perform a stochastic safety analysis of ACC systems, accounting for embedded sensing delays and anticipation, using empirically calibrated control parameters from real-world data. The experiments revealed that the impact of sensing delays on ACC is multifaceted. Specifically, sensing delays negatively affect ACC stability, with the severity increasing as the delay lengthens. Furthermore, collision risk in cut-in scenarios becomes more significant with sensing delays, particularly when the cut-in vehicle is slower than the following vehicle and when cut-ins are aggressive. However, anticipation plays a crucial role in mitigating these risks. Even with a 0.6-second anticipation, collision risk can be reduced by 91% in highly adverse scenarios. Finally, both sensing delays and anticipation have effects that intensify with their duration. An anticipation period of 2 seconds effectively ensures safety in aggressive cut-in conditions, even in the presence of sensing delays. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13456v1-abstract-full').style.display = 'none'; document.getElementById('2411.13456v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13293">arXiv:2411.13293</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13293">pdf</a>, <a href="https://arxiv.org/ps/2411.13293">ps</a>, <a href="https://arxiv.org/format/2411.13293">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Theoretical Economics">econ.TH</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Econometrics">econ.EM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> </div> </div> <p class="title is-5 mathjax"> Revealed Information </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Doval%2C+L">Laura Doval</a>, <a href="/search/?searchtype=author&amp;query=Eilat%2C+R">Ran Eilat</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+T">Tianhao Liu</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yangfan Zhou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13293v1-abstract-short" style="display: inline;"> An analyst observes the frequency with which a decision maker (DM) takes actions, but does not observe the frequency of actions conditional on the payoff-relevant state. We ask when can the analyst rationalize the DM&#39;s choices as if the DM first learns something about the state before taking action. We provide a support function characterization of the triples of utility functions, prior beliefs,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13293v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13293v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13293v1-abstract-full" style="display: none;"> An analyst observes the frequency with which a decision maker (DM) takes actions, but does not observe the frequency of actions conditional on the payoff-relevant state. We ask when can the analyst rationalize the DM&#39;s choices as if the DM first learns something about the state before taking action. We provide a support function characterization of the triples of utility functions, prior beliefs, and (marginal) distributions over actions such that the DM&#39;s action distribution is consistent with information given the agent&#39;s prior and utility function. Assumptions on the cardinality of the state space and the utility function allow us to refine this characterization, obtaining a sharp system of finitely many inequalities the utility function, prior, and action distribution must satisfy. We apply our characterization to study comparative statics and ring-network games, and to identify conditions under which a data set is consistent with a public information structure in first-order Bayesian persuasion games. We characterize the set of distributions over posterior beliefs that are consistent with the DM&#39;s choices. Assuming the first-order approach applies, we extend our results to settings with a continuum of actions and/or states.% <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13293v1-abstract-full').style.display = 'none'; document.getElementById('2411.13293v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12905">arXiv:2411.12905</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12905">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optics">physics.optics</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Materials Science">cond-mat.mtrl-sci</span> </div> </div> <p class="title is-5 mathjax"> Nonlinear optics in 2D materials: from classical to quantum </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Gu%2C+L">Liuxin Gu</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">You Zhou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12905v1-abstract-short" style="display: inline;"> Nonlinear optics has long been a cornerstone of modern photonic technology, enabling a wide array of applications, from frequency conversion to the generation of ultrafast light pulses. Recent breakthroughs in two-dimensional (2D) materials have opened a frontier in this field, offering new opportunities for both classical and quantum nonlinear optics. These atomically thin materials exhibit stron&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12905v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12905v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12905v1-abstract-full" style="display: none;"> Nonlinear optics has long been a cornerstone of modern photonic technology, enabling a wide array of applications, from frequency conversion to the generation of ultrafast light pulses. Recent breakthroughs in two-dimensional (2D) materials have opened a frontier in this field, offering new opportunities for both classical and quantum nonlinear optics. These atomically thin materials exhibit strong light-matter interactions and large nonlinear responses, thanks to their tunable lattice symmetries, strong resonance effects, and highly engineerable band structures. In this paper, we explore the potential that 2D materials bring to nonlinear optics, covering topics from classical nonlinear optics to nonlinearities at the few-photon level. We delve into how these materials enable possibilities, such as symmetry control, phase matching, and integration into photonic circuits. The fusion of 2D materials with nonlinear optics provides insights into the fundamental behaviors of elementary excitations such as electrons, excitons, and photons in low dimensional systems and has the potential to transform the landscape of next-generation photonic and quantum technologies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12905v1-abstract-full').style.display = 'none'; document.getElementById('2411.12905v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12815">arXiv:2411.12815</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12815">pdf</a>, <a href="https://arxiv.org/format/2411.12815">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Astrophysical Phenomena">astro-ph.HE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Earth and Planetary Astrophysics">astro-ph.EP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Solar and Stellar Astrophysics">astro-ph.SR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Plasma Physics">physics.plasm-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Space Physics">physics.space-ph</span> </div> </div> <p class="title is-5 mathjax"> Multi-Mission Observations of Relativistic Electrons and High-Speed Jets Linked to Shock Generated Transients </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Raptis%2C+S">Savvas Raptis</a>, <a href="/search/?searchtype=author&amp;query=Lindberg%2C+M">Martin Lindberg</a>, <a href="/search/?searchtype=author&amp;query=Liu%2C+T+Z">Terry Z. Liu</a>, <a href="/search/?searchtype=author&amp;query=Turner%2C+D+L">Drew L. Turner</a>, <a href="/search/?searchtype=author&amp;query=Lalti%2C+A">Ahmad Lalti</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yufei Zhou</a>, <a href="/search/?searchtype=author&amp;query=Kajdi%C4%8D%2C+P">Primo啪 Kajdi膷</a>, <a href="/search/?searchtype=author&amp;query=Kouloumvakos%2C+A">Athanasios Kouloumvakos</a>, <a href="/search/?searchtype=author&amp;query=Sibeck%2C+D+G">David G. Sibeck</a>, <a href="/search/?searchtype=author&amp;query=Vuorinen%2C+L">Laura Vuorinen</a>, <a href="/search/?searchtype=author&amp;query=Michael%2C+A">Adam Michael</a>, <a href="/search/?searchtype=author&amp;query=Shumko%2C+M">Mykhaylo Shumko</a>, <a href="/search/?searchtype=author&amp;query=Osmane%2C+A">Adnane Osmane</a>, <a href="/search/?searchtype=author&amp;query=Kr%C3%A4mer%2C+E">Eva Kr盲mer</a>, <a href="/search/?searchtype=author&amp;query=Turc%2C+L">Lucile Turc</a>, <a href="/search/?searchtype=author&amp;query=Karlsson%2C+T">Tomas Karlsson</a>, <a href="/search/?searchtype=author&amp;query=Katsavrias%2C+C">Christos Katsavrias</a>, <a href="/search/?searchtype=author&amp;query=Wilson%2C+L+B">Lynn B. Wilson III</a>, <a href="/search/?searchtype=author&amp;query=Madanian%2C+H">Hadi Madanian</a>, <a href="/search/?searchtype=author&amp;query=Blanco-Cano%2C+X">X贸chitl Blanco-Cano</a>, <a href="/search/?searchtype=author&amp;query=Cohen%2C+I+J">Ian J. Cohen</a>, <a href="/search/?searchtype=author&amp;query=Escoubet%2C+C+P">C. Philippe Escoubet</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12815v1-abstract-short" style="display: inline;"> Shock-generated transients, such as hot flow anomalies (HFAs), upstream of planetary bow shocks, play a critical role in electron acceleration. Using multi-mission data from NASA&#39;s Magnetospheric Multiscale (MMS) and ESA&#39;s Cluster missions, we demonstrate the transmission of HFAs through Earth&#39;s quasi-parallel bow shock, associated with acceleration of electrons up to relativistic energies. Energe&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12815v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12815v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12815v1-abstract-full" style="display: none;"> Shock-generated transients, such as hot flow anomalies (HFAs), upstream of planetary bow shocks, play a critical role in electron acceleration. Using multi-mission data from NASA&#39;s Magnetospheric Multiscale (MMS) and ESA&#39;s Cluster missions, we demonstrate the transmission of HFAs through Earth&#39;s quasi-parallel bow shock, associated with acceleration of electrons up to relativistic energies. Energetic electrons, initially accelerated upstream, are shown to remain broadly confined within the transmitted transient structures downstream, where betatron acceleration further boosts their energy due to elevated compression levels. Additionally, high-speed jets form at the compressive edges of HFAs, exhibiting a significant increase in dynamic pressure and potentially contributing to driving further localized compression. Our findings emphasize the efficiency of quasi-parallel shocks in driving particle acceleration far beyond the immediate shock transition region, expanding the acceleration region to a larger spatial domain. Finally, this study underscores the importance of multi-scale observational approach in understanding the convoluted processes behind collisionless shock physics and their broader implications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12815v1-abstract-full').style.display = 'none'; document.getElementById('2411.12815v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12644">arXiv:2411.12644</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12644">pdf</a>, <a href="https://arxiv.org/format/2411.12644">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> CodeXEmbed: A Generalist Embedding Model Family for Multiligual and Multi-task Code Retrieval </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Liu%2C+Y">Ye Liu</a>, <a href="/search/?searchtype=author&amp;query=Meng%2C+R">Rui Meng</a>, <a href="/search/?searchtype=author&amp;query=Joty%2C+S">Shafiq Joty</a>, <a href="/search/?searchtype=author&amp;query=Savarese%2C+S">Silvio Savarese</a>, <a href="/search/?searchtype=author&amp;query=Xiong%2C+C">Caiming Xiong</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yingbo Zhou</a>, <a href="/search/?searchtype=author&amp;query=Yavuz%2C+S">Semih Yavuz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12644v2-abstract-short" style="display: inline;"> Despite the success of text retrieval in many NLP tasks, code retrieval remains a largely underexplored area. Most text retrieval systems are tailored for natural language queries, often neglecting the specific challenges of retrieving code. This gap leaves existing models unable to effectively capture the diversity of programming languages and tasks across different domains, highlighting the need&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12644v2-abstract-full').style.display = 'inline'; document.getElementById('2411.12644v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12644v2-abstract-full" style="display: none;"> Despite the success of text retrieval in many NLP tasks, code retrieval remains a largely underexplored area. Most text retrieval systems are tailored for natural language queries, often neglecting the specific challenges of retrieving code. This gap leaves existing models unable to effectively capture the diversity of programming languages and tasks across different domains, highlighting the need for more focused research in code retrieval. To address this, we introduce CodeXEmbed, a family of large-scale code embedding models ranging from 400M to 7B parameters. Our novel training pipeline unifies multiple programming languages and transforms various code-related tasks into a common retrieval framework, enhancing model generalizability and retrieval performance. Our 7B model sets a new state-of-the-art (SOTA) in code retrieval, outperforming the previous leading model, Voyage-Code, by over 20% on CoIR benchmark. In addition to excelling in code retrieval, our models demonstrate competitive performance on the widely adopted BeIR text retrieval benchmark, offering versatility across domains. Experimental results demonstrate that improving retrieval performance significantly enhances end-to-end Retrieval-Augmented Generation (RAG) performance for code-related tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12644v2-abstract-full').style.display = 'none'; document.getElementById('2411.12644v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12635">arXiv:2411.12635</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12635">pdf</a>, <a href="https://arxiv.org/format/2411.12635">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> M3D: Dual-Stream Selective State Spaces and Depth-Driven Framework for High-Fidelity Single-View 3D Reconstruction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhang%2C+L">Luoxi Zhang</a>, <a href="/search/?searchtype=author&amp;query=Shrestha%2C+P">Pragyan Shrestha</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yu Zhou</a>, <a href="/search/?searchtype=author&amp;query=Xie%2C+C">Chun Xie</a>, <a href="/search/?searchtype=author&amp;query=Kitahara%2C+I">Itaru Kitahara</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12635v2-abstract-short" style="display: inline;"> The precise reconstruction of 3D objects from a single RGB image in complex scenes presents a critical challenge in virtual reality, autonomous driving, and robotics. Existing neural implicit 3D representation methods face significant difficulties in balancing the extraction of global and local features, particularly in diverse and complex environments, leading to insufficient reconstruction preci&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12635v2-abstract-full').style.display = 'inline'; document.getElementById('2411.12635v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12635v2-abstract-full" style="display: none;"> The precise reconstruction of 3D objects from a single RGB image in complex scenes presents a critical challenge in virtual reality, autonomous driving, and robotics. Existing neural implicit 3D representation methods face significant difficulties in balancing the extraction of global and local features, particularly in diverse and complex environments, leading to insufficient reconstruction precision and quality. We propose M3D, a novel single-view 3D reconstruction framework, to tackle these challenges. This framework adopts a dual-stream feature extraction strategy based on Selective State Spaces to effectively balance the extraction of global and local features, thereby improving scene comprehension and representation precision. Additionally, a parallel branch extracts depth information, effectively integrating visual and geometric features to enhance reconstruction quality and preserve intricate details. Experimental results indicate that the fusion of multi-scale features with depth information via the dual-branch feature extraction significantly boosts geometric consistency and fidelity, achieving state-of-the-art reconstruction performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12635v2-abstract-full').style.display = 'none'; document.getElementById('2411.12635v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 4 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.3.5 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12431">arXiv:2411.12431</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12431">pdf</a>, <a href="https://arxiv.org/format/2411.12431">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> CV-Cities: Advancing Cross-View Geo-Localization in Global Cities </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Huang%2C+G">Gaoshuang Huang</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yang Zhou</a>, <a href="/search/?searchtype=author&amp;query=Zhao%2C+L">Luying Zhao</a>, <a href="/search/?searchtype=author&amp;query=Gan%2C+W">Wenjian Gan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12431v1-abstract-short" style="display: inline;"> Cross-view geo-localization (CVGL), which involves matching and retrieving satellite images to determine the geographic location of a ground image, is crucial in GNSS-constrained scenarios. However, this task faces significant challenges due to substantial viewpoint discrepancies, the complexity of localization scenarios, and the need for global localization. To address these issues, we propose a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12431v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12431v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12431v1-abstract-full" style="display: none;"> Cross-view geo-localization (CVGL), which involves matching and retrieving satellite images to determine the geographic location of a ground image, is crucial in GNSS-constrained scenarios. However, this task faces significant challenges due to substantial viewpoint discrepancies, the complexity of localization scenarios, and the need for global localization. To address these issues, we propose a novel CVGL framework that integrates the vision foundational model DINOv2 with an advanced feature mixer. Our framework introduces the symmetric InfoNCE loss and incorporates near-neighbor sampling and dynamic similarity sampling strategies, significantly enhancing localization accuracy. Experimental results show that our framework surpasses existing methods across multiple public and self-built datasets. To further improve globalscale performance, we have developed CV-Cities, a novel dataset for global CVGL. CV-Cities includes 223,736 ground-satellite image pairs with geolocation data, spanning sixteen cities across six continents and covering a wide range of complex scenarios, providing a challenging benchmark for CVGL. The framework trained with CV-Cities demonstrates high localization accuracy in various test cities, highlighting its strong globalization and generalization capabilities. Our datasets and codes are available at https://github.com/GaoShuang98/CVCities. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12431v1-abstract-full').style.display = 'none'; document.getElementById('2411.12431v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Datasets and codes are available, accepted by IEEE JSTARS</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12178">arXiv:2411.12178</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12178">pdf</a>, <a href="https://arxiv.org/format/2411.12178">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> First evidence for direct CP violation in beauty to charmonium decays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=LHCb+collaboration"> LHCb collaboration</a>, <a href="/search/?searchtype=author&amp;query=Aaij%2C+R">R. Aaij</a>, <a href="/search/?searchtype=author&amp;query=Abdelmotteleb%2C+A+S+W">A. S. W. Abdelmotteleb</a>, <a href="/search/?searchtype=author&amp;query=Beteta%2C+C+A">C. Abellan Beteta</a>, <a href="/search/?searchtype=author&amp;query=Abudin%C3%A9n%2C+F">F. Abudin茅n</a>, <a href="/search/?searchtype=author&amp;query=Ackernley%2C+T">T. Ackernley</a>, <a href="/search/?searchtype=author&amp;query=Adefisoye%2C+A+A">A. A. Adefisoye</a>, <a href="/search/?searchtype=author&amp;query=Adeva%2C+B">B. Adeva</a>, <a href="/search/?searchtype=author&amp;query=Adinolfi%2C+M">M. Adinolfi</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Agapopoulou%2C+C">C. Agapopoulou</a>, <a href="/search/?searchtype=author&amp;query=Aidala%2C+C+A">C. A. Aidala</a>, <a href="/search/?searchtype=author&amp;query=Ajaltouni%2C+Z">Z. Ajaltouni</a>, <a href="/search/?searchtype=author&amp;query=Akar%2C+S">S. Akar</a>, <a href="/search/?searchtype=author&amp;query=Akiba%2C+K">K. Akiba</a>, <a href="/search/?searchtype=author&amp;query=Albicocco%2C+P">P. Albicocco</a>, <a href="/search/?searchtype=author&amp;query=Albrecht%2C+J">J. Albrecht</a>, <a href="/search/?searchtype=author&amp;query=Alessio%2C+F">F. Alessio</a>, <a href="/search/?searchtype=author&amp;query=Alexander%2C+M">M. Alexander</a>, <a href="/search/?searchtype=author&amp;query=Aliouche%2C+Z">Z. Aliouche</a>, <a href="/search/?searchtype=author&amp;query=Cartelle%2C+P+A">P. Alvarez Cartelle</a>, <a href="/search/?searchtype=author&amp;query=Amalric%2C+R">R. Amalric</a>, <a href="/search/?searchtype=author&amp;query=Amato%2C+S">S. Amato</a>, <a href="/search/?searchtype=author&amp;query=Amey%2C+J+L">J. L. Amey</a>, <a href="/search/?searchtype=author&amp;query=Amhis%2C+Y">Y. Amhis</a> , et al. (1127 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12178v2-abstract-short" style="display: inline;"> The $C\!P$ asymmetry and branching fraction of the CKM-suppressed decay $B^+\!\to J\mskip -3mu/\mskip -2mu蠄\,蟺^+$ are precisely measured relative to the favoured decay $B^+\!\to J\mskip -3mu/\mskip -2mu蠄\,K^+$, using a sample of proton-proton collision data corresponding to an integrated luminosity of $5.4~\mathrm{fb}^{-1}$ recorded at center-of-mass energy of $13~\mathrm{TeV}$ during 2016--2018.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12178v2-abstract-full').style.display = 'inline'; document.getElementById('2411.12178v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12178v2-abstract-full" style="display: none;"> The $C\!P$ asymmetry and branching fraction of the CKM-suppressed decay $B^+\!\to J\mskip -3mu/\mskip -2mu蠄\,蟺^+$ are precisely measured relative to the favoured decay $B^+\!\to J\mskip -3mu/\mskip -2mu蠄\,K^+$, using a sample of proton-proton collision data corresponding to an integrated luminosity of $5.4~\mathrm{fb}^{-1}$ recorded at center-of-mass energy of $13~\mathrm{TeV}$ during 2016--2018. The results of the $C\!P$ asymmetry difference and branching fraction ratio are \begin{align*} 螖\mathcal{A}^{C\!P} &amp;\equiv \mathcal{A}^{C\!P}(B^+ \to J\mskip -3mu/\mskip -2mu蠄\,蟺^+) - \mathcal{A}^{C\!P}(B^+ \to J\mskip -3mu/\mskip -2mu蠄\,K^+) = (1.29 \pm 0.49 \pm 0.08) \times 10^{-2}, \end{align*} \begin{equation*} \mathcal{R}_{蟺/K} \equiv \frac{\mathcal{B}(B^+ \!\to J\mskip -3mu/\mskip -2mu蠄\,蟺^+)}{\mathcal{B}(B^+ \!\to J\mskip -3mu/\mskip -2mu蠄\,K^+)} = (3.852 \pm 0.022 \pm 0.018) \times 10^{-2}. \end{equation*} where the first uncertainties are statistical and the second systematic. A combination with previous LHCb results based on data collected at $7$ and $8~\mathrm{TeV}$ in 2011 and 2012 yields $螖\mathcal{A}^{C\!P} = (1.42 \pm 0.43 \pm 0.08) \times 10^{-2}$ and $\mathcal{R}_{蟺/K} = (3.846 \pm 0.018 \pm 0.018) \times 10^{-2}$. The combined $螖\mathcal{A}^{C\!P}$ value deviates from zero by 3.2 standard deviations, providing the first evidence for direct $C\!P$ violation in the amplitudes of beauty decays to charmonium final states. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12178v2-abstract-full').style.display = 'none'; document.getElementById('2411.12178v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 2 figures, no conference or journal information All figures and tables, along with machine-readable versions and any supplementary material and additional information, are available at https://lbfence.cern.ch/alcm/public/analysis/full-details/1623/ (LHCb public pages)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> LHCb-PAPER-2024-031 CERN-EP-2024-286 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11980">arXiv:2411.11980</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11980">pdf</a>, <a href="https://arxiv.org/format/2411.11980">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Transmission Line Outage Probability Prediction Under Extreme Events Using Peter-Clark Bayesian Structural Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Chen%2C+X">Xiaolin Chen</a>, <a href="/search/?searchtype=author&amp;query=Huang%2C+Q">Qiuhua Huang</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yuqi Zhou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11980v1-abstract-short" style="display: inline;"> Recent years have seen a notable increase in the frequency and intensity of extreme weather events. With a rising number of power outages caused by these events, accurate prediction of power line outages is essential for safe and reliable operation of power grids. The Bayesian network is a probabilistic model that is very effective for predicting line outages under weather-related uncertainties. H&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11980v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11980v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11980v1-abstract-full" style="display: none;"> Recent years have seen a notable increase in the frequency and intensity of extreme weather events. With a rising number of power outages caused by these events, accurate prediction of power line outages is essential for safe and reliable operation of power grids. The Bayesian network is a probabilistic model that is very effective for predicting line outages under weather-related uncertainties. However, most existing studies in this area offer general risk assessments, but fall short of providing specific outage probabilities. In this work, we introduce a novel approach for predicting transmission line outage probabilities using a Bayesian network combined with Peter-Clark (PC) structural learning. Our approach not only enables precise outage probability calculations, but also demonstrates better scalability and robust performance, even with limited data. Case studies using data from BPA and NOAA show the effectiveness of this approach, while comparisons with several existing methods further highlight its advantages. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11980v1-abstract-full').style.display = 'none'; document.getElementById('2411.11980v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11913">arXiv:2411.11913</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11913">pdf</a>, <a href="https://arxiv.org/format/2411.11913">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> On-Board Vision-Language Models for Personalized Autonomous Vehicle Motion Control: System Design and Real-World Validation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Cui%2C+C">Can Cui</a>, <a href="/search/?searchtype=author&amp;query=Yang%2C+Z">Zichong Yang</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yupeng Zhou</a>, <a href="/search/?searchtype=author&amp;query=Peng%2C+J">Juntong Peng</a>, <a href="/search/?searchtype=author&amp;query=Park%2C+S">Sung-Yeon Park</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+C">Cong Zhang</a>, <a href="/search/?searchtype=author&amp;query=Ma%2C+Y">Yunsheng Ma</a>, <a href="/search/?searchtype=author&amp;query=Cao%2C+X">Xu Cao</a>, <a href="/search/?searchtype=author&amp;query=Ye%2C+W">Wenqian Ye</a>, <a href="/search/?searchtype=author&amp;query=Feng%2C+Y">Yiheng Feng</a>, <a href="/search/?searchtype=author&amp;query=Panchal%2C+J">Jitesh Panchal</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+L">Lingxi Li</a>, <a href="/search/?searchtype=author&amp;query=Chen%2C+Y">Yaobin Chen</a>, <a href="/search/?searchtype=author&amp;query=Wang%2C+Z">Ziran Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11913v1-abstract-short" style="display: inline;"> Personalized driving refers to an autonomous vehicle&#39;s ability to adapt its driving behavior or control strategies to match individual users&#39; preferences and driving styles while maintaining safety and comfort standards. However, existing works either fail to capture every individual preference precisely or become computationally inefficient as the user base expands. Vision-Language Models (VLMs)&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11913v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11913v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11913v1-abstract-full" style="display: none;"> Personalized driving refers to an autonomous vehicle&#39;s ability to adapt its driving behavior or control strategies to match individual users&#39; preferences and driving styles while maintaining safety and comfort standards. However, existing works either fail to capture every individual preference precisely or become computationally inefficient as the user base expands. Vision-Language Models (VLMs) offer promising solutions to this front through their natural language understanding and scene reasoning capabilities. In this work, we propose a lightweight yet effective on-board VLM framework that provides low-latency personalized driving performance while maintaining strong reasoning capabilities. Our solution incorporates a Retrieval-Augmented Generation (RAG)-based memory module that enables continuous learning of individual driving preferences through human feedback. Through comprehensive real-world vehicle deployment and experiments, our system has demonstrated the ability to provide safe, comfortable, and personalized driving experiences across various scenarios and significantly reduce takeover rates by up to 76.9%. To the best of our knowledge, this work represents the first end-to-end VLM-based motion control system in real-world autonomous vehicles. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11913v1-abstract-full').style.display = 'none'; document.getElementById('2411.11913v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11904">arXiv:2411.11904</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11904">pdf</a>, <a href="https://arxiv.org/format/2411.11904">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> GeoGround: A Unified Large Vision-Language Model. for Remote Sensing Visual Grounding </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yue Zhou</a>, <a href="/search/?searchtype=author&amp;query=Lan%2C+M">Mengcheng Lan</a>, <a href="/search/?searchtype=author&amp;query=Li%2C+X">Xiang Li</a>, <a href="/search/?searchtype=author&amp;query=Ke%2C+Y">Yiping Ke</a>, <a href="/search/?searchtype=author&amp;query=Jiang%2C+X">Xue Jiang</a>, <a href="/search/?searchtype=author&amp;query=Feng%2C+L">Litong Feng</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+W">Wayne Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11904v1-abstract-short" style="display: inline;"> Remote sensing (RS) visual grounding aims to use natural language expression to locate specific objects (in the form of the bounding box or segmentation mask) in RS images, enhancing human interaction with intelligent RS interpretation systems. Early research in this area was primarily based on horizontal bounding boxes (HBBs), but as more diverse RS datasets have become available, tasks involving&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11904v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11904v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11904v1-abstract-full" style="display: none;"> Remote sensing (RS) visual grounding aims to use natural language expression to locate specific objects (in the form of the bounding box or segmentation mask) in RS images, enhancing human interaction with intelligent RS interpretation systems. Early research in this area was primarily based on horizontal bounding boxes (HBBs), but as more diverse RS datasets have become available, tasks involving oriented bounding boxes (OBBs) and segmentation masks have emerged. In practical applications, different targets require different grounding types: HBB can localize an object&#39;s position, OBB provides its orientation, and mask depicts its shape. However, existing specialized methods are typically tailored to a single type of RS visual grounding task and are hard to generalize across tasks. In contrast, large vision-language models (VLMs) exhibit powerful multi-task learning capabilities but struggle to handle dense prediction tasks like segmentation. This paper proposes GeoGround, a novel framework that unifies support for HBB, OBB, and mask RS visual grounding tasks, allowing flexible output selection. Rather than customizing the architecture of VLM, our work aims to elegantly support pixel-level visual grounding output through the Text-Mask technique. We define prompt-assisted and geometry-guided learning to enhance consistency across different signals. To support model training, we present refGeo, a large-scale RS visual instruction-following dataset containing 161k image-text pairs. Experimental results show that GeoGround demonstrates strong performance across four RS visual grounding tasks, matching or surpassing the performance of specialized methods on multiple benchmarks. Code available at https://github.com/zytx121/GeoGround <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11904v1-abstract-full').style.display = 'none'; document.getElementById('2411.11904v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">25 pages, 19 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11791">arXiv:2411.11791</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11791">pdf</a>, <a href="https://arxiv.org/format/2411.11791">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Machine Learning-Assisted Distribution System Network Reconfiguration Problem </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Asiamah%2C+R">Richard Asiamah</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yuqi Zhou</a>, <a href="/search/?searchtype=author&amp;query=Zamzam%2C+A+S">Ahmed S. Zamzam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11791v1-abstract-short" style="display: inline;"> High penetration from volatile renewable energy resources in the grid and the varying nature of loads raise the need for frequent line switching to ensure the efficient operation of electrical distribution networks. Operators must ensure maximum load delivery, reduced losses, and the operation between voltage limits. However, computations to decide the optimal feeder configuration are often comput&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11791v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11791v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11791v1-abstract-full" style="display: none;"> High penetration from volatile renewable energy resources in the grid and the varying nature of loads raise the need for frequent line switching to ensure the efficient operation of electrical distribution networks. Operators must ensure maximum load delivery, reduced losses, and the operation between voltage limits. However, computations to decide the optimal feeder configuration are often computationally expensive and intractable, making it unfavorable for real-time operations. This is mainly due to the existence of binary variables in the network reconfiguration optimization problem. To tackle this issue, we have devised an approach that leverages machine learning techniques to reshape distribution networks featuring multiple substations. This involves predicting the substation responsible for serving each part of the network. Hence, it leaves simple and more tractable Optimal Power Flow problems to be solved. This method can produce accurate results in a significantly faster time, as demonstrated using the IEEE 37-bus distribution feeder. Compared to the traditional optimization-based approaches, a feasible solution is achieved approximately ten times faster for all the tested scenarios. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11791v1-abstract-full').style.display = 'none'; document.getElementById('2411.11791v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11648">arXiv:2411.11648</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11648">pdf</a>, <a href="https://arxiv.org/ps/2411.11648">ps</a>, <a href="https://arxiv.org/format/2411.11648">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Phenomenology">hep-ph</span> </div> </div> <p class="title is-5 mathjax"> Evidence for Two Excited $惟^{-}$ Hyperons </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=BESIII+Collaboration"> BESIII Collaboration</a>, <a href="/search/?searchtype=author&amp;query=Ablikim%2C+M">M. Ablikim</a>, <a href="/search/?searchtype=author&amp;query=Achasov%2C+M+N">M. N. Achasov</a>, <a href="/search/?searchtype=author&amp;query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&amp;query=Afedulidis%2C+O">O. Afedulidis</a>, <a href="/search/?searchtype=author&amp;query=Ai%2C+X+C">X. C. Ai</a>, <a href="/search/?searchtype=author&amp;query=Aliberti%2C+R">R. Aliberti</a>, <a href="/search/?searchtype=author&amp;query=Amoroso%2C+A">A. Amoroso</a>, <a href="/search/?searchtype=author&amp;query=Bai%2C+Y">Y. Bai</a>, <a href="/search/?searchtype=author&amp;query=Bakina%2C+O">O. Bakina</a>, <a href="/search/?searchtype=author&amp;query=Balossino%2C+I">I. Balossino</a>, <a href="/search/?searchtype=author&amp;query=Ban%2C+Y">Y. Ban</a>, <a href="/search/?searchtype=author&amp;query=Bao%2C+H+-">H. -R. Bao</a>, <a href="/search/?searchtype=author&amp;query=Batozskaya%2C+V">V. Batozskaya</a>, <a href="/search/?searchtype=author&amp;query=Begzsuren%2C+K">K. Begzsuren</a>, <a href="/search/?searchtype=author&amp;query=Berger%2C+N">N. Berger</a>, <a href="/search/?searchtype=author&amp;query=Berlowski%2C+M">M. Berlowski</a>, <a href="/search/?searchtype=author&amp;query=Bertani%2C+M">M. Bertani</a>, <a href="/search/?searchtype=author&amp;query=Bettoni%2C+D">D. Bettoni</a>, <a href="/search/?searchtype=author&amp;query=Bianchi%2C+F">F. Bianchi</a>, <a href="/search/?searchtype=author&amp;query=Bianco%2C+E">E. Bianco</a>, <a href="/search/?searchtype=author&amp;query=Bortone%2C+A">A. Bortone</a>, <a href="/search/?searchtype=author&amp;query=Boyko%2C+I">I. Boyko</a>, <a href="/search/?searchtype=author&amp;query=Briere%2C+R+A">R. A. Briere</a>, <a href="/search/?searchtype=author&amp;query=Brueggemann%2C+A">A. Brueggemann</a> , et al. (650 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11648v1-abstract-short" style="display: inline;"> Using $e^+e^-$ collision data corresponding to an integrated luminosity of 19 fb$^{-1}$ collected by the BESIII detector at center-of-mass energies ranging from 4.13 to 4.70 GeV, we report the first evidence for a new excited $惟^{-}$ hyperon, the $惟^*(2109)^{-}$, through the process $e^+ e^- \to 惟^*(2109)^{-} \bar惟^{+} +c.c.$ with a significance of 3.7 $蟽$. The mass and width of $惟^*(2109)^{-}$ ar&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11648v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11648v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11648v1-abstract-full" style="display: none;"> Using $e^+e^-$ collision data corresponding to an integrated luminosity of 19 fb$^{-1}$ collected by the BESIII detector at center-of-mass energies ranging from 4.13 to 4.70 GeV, we report the first evidence for a new excited $惟^{-}$ hyperon, the $惟^*(2109)^{-}$, through the process $e^+ e^- \to 惟^*(2109)^{-} \bar惟^{+} +c.c.$ with a significance of 3.7 $蟽$. The mass and width of $惟^*(2109)^{-}$ are measured to be $2108.8 \pm 5.5_{\rm stat} \pm 1.5_{\rm syst} {\rm MeV}/c^{2}$ and $21.6 \pm 17.7_{\rm stat} \pm 9.4_{\rm syst} {\rm MeV}$, respectively. We also present evidence for production of the $惟^*(2012)^{-}$ in the process $e^+ e^- \to 惟^*(2012)^{-} \bar惟^{+} +c.c.$ with a significance of 3.7 $蟽$. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11648v1-abstract-full').style.display = 'none'; document.getElementById('2411.11648v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 2 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11561">arXiv:2411.11561</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11561">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Superconductivity">cond-mat.supr-con</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> </div> </div> <p class="title is-5 mathjax"> Intertwined effects of elastic deformation and damage on vortex pinning and Jc degradation in polycrystalline superconductors </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Wang%2C+Q">Qing-Yu Wang</a>, <a href="/search/?searchtype=author&amp;query=Hu%2C+S">Shuai Hu</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">You-He Zhou</a>, <a href="/search/?searchtype=author&amp;query=Xue%2C+C">Cun Xue</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11561v3-abstract-short" style="display: inline;"> The damage and the critical current density (Jc) degradation of polycrystalline superconductors induced by strain dramatically influence their performance in applications. Unfortunately, the state-of-the-art experimental techniques are unable to detect the damage of internal polycrystalline structures and the microscopic superconductivity in the presence of strain. We propose a groundbreaking mult&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11561v3-abstract-full').style.display = 'inline'; document.getElementById('2411.11561v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11561v3-abstract-full" style="display: none;"> The damage and the critical current density (Jc) degradation of polycrystalline superconductors induced by strain dramatically influence their performance in applications. Unfortunately, the state-of-the-art experimental techniques are unable to detect the damage of internal polycrystalline structures and the microscopic superconductivity in the presence of strain. We propose a groundbreaking multi-scale theoretical framework aimed at revealing the underlying physical mechanisms of the reversible and irreversible Jc degradation induced by the strain through tackling the complex intertwined effects of elastic deformation and damage on the superconductivity of grain boundaries and the associated vortex pinning. The results are well validated by experimental measurements. Utilizing the benchmarked physical model, we demonstrate that the damage evolutions of polycrystalline superconductors in the presence of strain can be approximately estimated by means of the electromagnetic experiments on Jc. Furthermore, we also discuss the characteristics of damage and Jc degradation of polycrystalline superconductors subjected to biaxial mechanical loads. The findings will pave the way to investigate the tunable vortex pinning and Jc of superconductors by strain, and to develop a brand new electromagnetic method to manifest the damage of polycrystalline superconductors. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11561v3-abstract-full').style.display = 'none'; document.getElementById('2411.11561v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages, 4figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11440">arXiv:2411.11440</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11440">pdf</a>, <a href="https://arxiv.org/format/2411.11440">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cosmology and Nongalactic Astrophysics">astro-ph.CO</span> </div> </div> <p class="title is-5 mathjax"> Map-based E/B separation of filtered timestreams using space-based E-mode observations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yuyang Zhou</a>, <a href="/search/?searchtype=author&amp;query=Lee%2C+A">Adrian Lee</a>, <a href="/search/?searchtype=author&amp;query=Chinone%2C+Y">Yuji Chinone</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11440v1-abstract-short" style="display: inline;"> E to B mixing or &#34;leakage&#34; due to time-ordered data (TOD) filtering has become an important source of sensitivity loss that ground-based cosmic microwave background polarization experiments must address. However, it is a difficult problem for which very few viable solutions exist. In this paper, we expand upon satellite E-mode methods to cover E/B leakage specifically due to TOD filtering. We take&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11440v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11440v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11440v1-abstract-full" style="display: none;"> E to B mixing or &#34;leakage&#34; due to time-ordered data (TOD) filtering has become an important source of sensitivity loss that ground-based cosmic microwave background polarization experiments must address. However, it is a difficult problem for which very few viable solutions exist. In this paper, we expand upon satellite E-mode methods to cover E/B leakage specifically due to TOD filtering. We take a satellite E-mode map and TOD filter it through the ground-based experiment data analysis pipeline, from which we construct a map-space &#34;leakage template&#34; and subtract it from the ground-based experiment map. We evaluate the residual leakage by simulating the satellite E-mode maps with Planck-like and LiteBIRD-like noise levels, and simulate the ground-based experiment with Simons Observatory-like and CMB-S4-like noise levels. The effectiveness of the method is measured in the improvement of the Fisher uncertainty $蟽(r=0)$. We find that our method can reduce $蟽(r=0)$ by $\sim15\text{--}75\%$ depending on the noise levels considered. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11440v1-abstract-full').style.display = 'none'; document.getElementById('2411.11440v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">22 pages, 15 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11364">arXiv:2411.11364</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11364">pdf</a>, <a href="https://arxiv.org/format/2411.11364">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Continual Task Learning through Adaptive Policy Self-Composition </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&amp;query=Hu%2C+S">Shengchao Hu</a>, <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y">Yuhang Zhou</a>, <a href="/search/?searchtype=author&amp;query=Fan%2C+Z">Ziqing Fan</a>, <a href="/search/?searchtype=author&amp;query=Hu%2C+J">Jifeng Hu</a>, <a href="/search/?searchtype=author&amp;query=Shen%2C+L">Li Shen</a>, <a href="/search/?searchtype=author&amp;query=Zhang%2C+Y">Ya Zhang</a>, <a href="/search/?searchtype=author&amp;query=Tao%2C+D">Dacheng Tao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11364v1-abstract-short" style="display: inline;"> Training a generalizable agent to continually learn a sequence of tasks from offline trajectories is a natural requirement for long-lived agents, yet remains a significant challenge for current offline reinforcement learning (RL) algorithms. Specifically, an agent must be able to rapidly adapt to new tasks using newly collected trajectories (plasticity), while retaining knowledge from previously l&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11364v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11364v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11364v1-abstract-full" style="display: none;"> Training a generalizable agent to continually learn a sequence of tasks from offline trajectories is a natural requirement for long-lived agents, yet remains a significant challenge for current offline reinforcement learning (RL) algorithms. Specifically, an agent must be able to rapidly adapt to new tasks using newly collected trajectories (plasticity), while retaining knowledge from previously learned tasks (stability). However, systematic analyses of this setting are scarce, and it remains unclear whether conventional continual learning (CL) methods are effective in continual offline RL (CORL) scenarios. In this study, we develop the Offline Continual World benchmark and demonstrate that traditional CL methods struggle with catastrophic forgetting, primarily due to the unique distribution shifts inherent to CORL scenarios. To address this challenge, we introduce CompoFormer, a structure-based continual transformer model that adaptively composes previous policies via a meta-policy network. Upon encountering a new task, CompoFormer leverages semantic correlations to selectively integrate relevant prior policies alongside newly trained parameters, thereby enhancing knowledge sharing and accelerating the learning process. Our experiments reveal that CompoFormer outperforms conventional CL methods, particularly in longer task sequences, showcasing a promising balance between plasticity and stability. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11364v1-abstract-full').style.display = 'none'; document.getElementById('2411.11364v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">21 pages, 8 figures</span> </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhou%2C+Y&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10