CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–50 of 1,873 results for author: <span class="mathjax">Zheng, Z</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/" aria-role="search"> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Zheng, Z"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Zheng%2C+Z&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Zheng, Z"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&query=Zheng%2C+Z&start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&query=Zheng%2C+Z&start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&query=Zheng%2C+Z&start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&query=Zheng%2C+Z&start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&query=Zheng%2C+Z&start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&query=Zheng%2C+Z&start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">…</span></li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.17759">arXiv:2502.17759</a> <span> [<a href="https://arxiv.org/pdf/2502.17759">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Label-free Prediction of Vascular Connectivity in Perfused Microvascular Networks in vitro </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Xu%2C+L">Liang Xu</a>, <a href="/search/?searchtype=author&query=Song%2C+P">Pengwu Song</a>, <a href="/search/?searchtype=author&query=Zhu%2C+S">Shilu Zhu</a>, <a href="/search/?searchtype=author&query=Zhang%2C+Y">Yang Zhang</a>, <a href="/search/?searchtype=author&query=Zhang%2C+R">Ru Zhang</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zhiyuan Zheng</a>, <a href="/search/?searchtype=author&query=Zhang%2C+Q">Qingdong Zhang</a>, <a href="/search/?searchtype=author&query=Gao%2C+J">Jie Gao</a>, <a href="/search/?searchtype=author&query=Han%2C+C">Chen Han</a>, <a href="/search/?searchtype=author&query=Sun%2C+M">Mingzhai Sun</a>, <a href="/search/?searchtype=author&query=Yao%2C+P">Peng Yao</a>, <a href="/search/?searchtype=author&query=Ye%2C+M">Min Ye</a>, <a href="/search/?searchtype=author&query=Xu%2C+R+X">Ronald X. Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.17759v1-abstract-short" style="display: inline;"> Continuous monitoring and in-situ assessment of microvascular connectivity have significant implications for culturing vascularized organoids and optimizing the therapeutic strategies. However, commonly used methods for vascular connectivity assessment heavily rely on fluorescent labels that may either raise biocompatibility concerns or interrupt the normal cell growth process. To address this iss… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.17759v1-abstract-full').style.display = 'inline'; document.getElementById('2502.17759v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.17759v1-abstract-full" style="display: none;"> Continuous monitoring and in-situ assessment of microvascular connectivity have significant implications for culturing vascularized organoids and optimizing the therapeutic strategies. However, commonly used methods for vascular connectivity assessment heavily rely on fluorescent labels that may either raise biocompatibility concerns or interrupt the normal cell growth process. To address this issue, a Vessel Connectivity Network (VC-Net) was developed for label-free assessment of vascular connectivity. To validate the VC-Net, microvascular networks (MVNs) were cultured in vitro and their microscopic images were acquired at different culturing conditions as a training dataset. The VC-Net employs a Vessel Queue Contrastive Learning (VQCL) method and a class imbalance algorithm to address the issues of limited sample size, indistinctive class features and imbalanced class distribution in the dataset. The VC-Net successfully evaluated the vascular connectivity with no significant deviation from that by fluorescence imaging. In addition, the proposed VC-Net successfully differentiated the connectivity characteristics between normal and tumor-related MVNs. In comparison with those cultured in the regular microenvironment, the averaged connectivity of MVNs cultured in the tumor-related microenvironment decreased by 30.8%, whereas the non-connected area increased by 37.3%. This study provides a new avenue for label-free and continuous assessment of organoid or tumor vascularization in vitro. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.17759v1-abstract-full').style.display = 'none'; document.getElementById('2502.17759v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.15569">arXiv:2502.15569</a> <span> [<a href="https://arxiv.org/pdf/2502.15569">pdf</a>, <a href="https://arxiv.org/format/2502.15569">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Astrophysics of Galaxies">astro-ph.GA</span> </div> </div> <p class="title is-5 mathjax"> The Evolution of Size and Merger Fraction of Submillimeter Galaxies across $1 < z \lesssim 6$ as Observed by JWST </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Ren%2C+J">Jian Ren</a>, <a href="/search/?searchtype=author&query=Liu%2C+F+S">F. S. Liu</a>, <a href="/search/?searchtype=author&query=Li%2C+N">Nan Li</a>, <a href="/search/?searchtype=author&query=Zhao%2C+P">Pinsong Zhao</a>, <a href="/search/?searchtype=author&query=Cui%2C+Q">Qifan Cui</a>, <a href="/search/?searchtype=author&query=Song%2C+Q">Qi Song</a>, <a href="/search/?searchtype=author&query=Li%2C+Y">Yubin Li</a>, <a href="/search/?searchtype=author&query=Mo%2C+H">Hao Mo</a>, <a href="/search/?searchtype=author&query=Yesuf%2C+H+M">Hassen M. Yesuf</a>, <a href="/search/?searchtype=author&query=Wang%2C+W">Weichen Wang</a>, <a href="/search/?searchtype=author&query=An%2C+F">Fangxia An</a>, <a href="/search/?searchtype=author&query=Zheng%2C+X+Z">Xian Zhong Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.15569v1-abstract-short" style="display: inline;"> Precise tracking of the growth in galaxy size and the evolution of merger fractions with redshift is vital for understanding the formation history of submillimeter galaxies (SMGs). This study investigates these evolutions over a broad redshift range ($1 < z \lesssim 6$), using a sample of 222 SMGs with a median redshift of $z = 2.61^{+0.89}_{-0.82}$ identified by ALMA and JCMT, enhanced by the adv… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.15569v1-abstract-full').style.display = 'inline'; document.getElementById('2502.15569v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.15569v1-abstract-full" style="display: none;"> Precise tracking of the growth in galaxy size and the evolution of merger fractions with redshift is vital for understanding the formation history of submillimeter galaxies (SMGs). This study investigates these evolutions over a broad redshift range ($1 < z \lesssim 6$), using a sample of 222 SMGs with a median redshift of $z = 2.61^{+0.89}_{-0.82}$ identified by ALMA and JCMT, enhanced by the advanced imaging capabilities of the JWST/NIRCam and MIRI. We find significant evolution in effective radii ($R_e$) in rest-frame V-band ($R_e \propto (1 + z)^{-0.87 \pm 0.08}$) and near-infrared (NIR) band ($R_e \propto (1 + z)^{-0.88 \pm 0.11}$), with the NIR size evolution resembling that of massive star-forming galaxies at lower redshift. Visual inspections reveal a major merger fraction of $24.3 \pm 3.7\%$ and an interaction fraction of up to $48.4 \pm 11.1\%$. The major merger fraction exhibits an increase from 14.7$\pm9.1$\% at $z = 1$ to 26.6$\pm 8.4$\% at $z = 3$, after which it remains approximately constant across the redshift range $3 < z < 6$. In contrast, the interaction fraction remains relatively stable across the range $2 < z < 5$. Our results indicate that late-stage major mergers are not the primary formation mechanism for SMGs at $z<3$, while interactions appear to play a significant role across the broader redshift range of $1<z<6$. Additionally, HST-based major merger identifications may overestimate the true fraction by a factor of 1.7 at $z \sim 2$. These findings highlight the varying roles of mergers and interactions in driving the formation of massive, dusty star-forming galaxies across different redshifts. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.15569v1-abstract-full').style.display = 'none'; document.getElementById('2502.15569v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 9 figures, accepted for publication in the Astrophysical Journal</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.15163">arXiv:2502.15163</a> <span> [<a href="https://arxiv.org/pdf/2502.15163">pdf</a>, <a href="https://arxiv.org/format/2502.15163">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> HOpenCls: Training Hyperspectral Image Open-Set Classifiers in Their Living Environments </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zhao%2C+H">Hengwei Zhao</a>, <a href="/search/?searchtype=author&query=Wang%2C+X">Xinyu Wang</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zhuo Zheng</a>, <a href="/search/?searchtype=author&query=Li%2C+J">Jingtao Li</a>, <a href="/search/?searchtype=author&query=Zhong%2C+Y">Yanfei Zhong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.15163v1-abstract-short" style="display: inline;"> Hyperspectral image (HSI) open-set classification is critical for HSI classification models deployed in real-world environments, where classifiers must simultaneously classify known classes and reject unknown classes. Recent methods utilize auxiliary unknown classes data to improve classification performance. However, the auxiliary unknown classes data is strongly assumed to be completely separabl… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.15163v1-abstract-full').style.display = 'inline'; document.getElementById('2502.15163v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.15163v1-abstract-full" style="display: none;"> Hyperspectral image (HSI) open-set classification is critical for HSI classification models deployed in real-world environments, where classifiers must simultaneously classify known classes and reject unknown classes. Recent methods utilize auxiliary unknown classes data to improve classification performance. However, the auxiliary unknown classes data is strongly assumed to be completely separable from known classes and requires labor-intensive annotation. To address this limitation, this paper proposes a novel framework, HOpenCls, to leverage the unlabeled wild data-that is the mixture of known and unknown classes. Such wild data is abundant and can be collected freely during deploying classifiers in their living environments. The key insight is reformulating the open-set HSI classification with unlabeled wild data as a positive-unlabeled (PU) learning problem. Specifically, the multi-label strategy is introduced to bridge the PU learning and open-set HSI classification, and then the proposed gradient contraction and gradient expansion module to make this PU learning problem tractable from the observation of abnormal gradient weights associated with wild data. Extensive experiment results demonstrate that incorporating wild data has the potential to significantly enhance open-set HSI classification in complex real-world scenarios. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.15163v1-abstract-full').style.display = 'none'; document.getElementById('2502.15163v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.14776">arXiv:2502.14776</a> <span> [<a href="https://arxiv.org/pdf/2502.14776">pdf</a>, <a href="https://arxiv.org/format/2502.14776">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> SurveyX: Academic Survey Automation via Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Liang%2C+X">Xun Liang</a>, <a href="/search/?searchtype=author&query=Yang%2C+J">Jiawei Yang</a>, <a href="/search/?searchtype=author&query=Wang%2C+Y">Yezhaohui Wang</a>, <a href="/search/?searchtype=author&query=Tang%2C+C">Chen Tang</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zifan Zheng</a>, <a href="/search/?searchtype=author&query=Niu%2C+S">Simin Niu</a>, <a href="/search/?searchtype=author&query=Song%2C+S">Shichao Song</a>, <a href="/search/?searchtype=author&query=Wang%2C+H">Hanyu Wang</a>, <a href="/search/?searchtype=author&query=Tang%2C+B">Bo Tang</a>, <a href="/search/?searchtype=author&query=Xiong%2C+F">Feiyu Xiong</a>, <a href="/search/?searchtype=author&query=Mao%2C+K">Keming Mao</a>, <a href="/search/?searchtype=author&query=li%2C+Z">Zhiyu li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.14776v1-abstract-short" style="display: inline;"> Large Language Models (LLMs) have demonstrated exceptional comprehension capabilities and a vast knowledge base, suggesting that LLMs can serve as efficient tools for automated survey generation. However, recent research related to automated survey generation remains constrained by some critical limitations like finite context window, lack of in-depth content discussion, and absence of systematic… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.14776v1-abstract-full').style.display = 'inline'; document.getElementById('2502.14776v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.14776v1-abstract-full" style="display: none;"> Large Language Models (LLMs) have demonstrated exceptional comprehension capabilities and a vast knowledge base, suggesting that LLMs can serve as efficient tools for automated survey generation. However, recent research related to automated survey generation remains constrained by some critical limitations like finite context window, lack of in-depth content discussion, and absence of systematic evaluation frameworks. Inspired by human writing processes, we propose SurveyX, an efficient and organized system for automated survey generation that decomposes the survey composing process into two phases: the Preparation and Generation phases. By innovatively introducing online reference retrieval, a pre-processing method called AttributeTree, and a re-polishing process, SurveyX significantly enhances the efficacy of survey composition. Experimental evaluation results show that SurveyX outperforms existing automated survey generation systems in content quality (0.259 improvement) and citation quality (1.76 enhancement), approaching human expert performance across multiple evaluation dimensions. Examples of surveys generated by SurveyX are available on www.surveyx.cn <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.14776v1-abstract-full').style.display = 'none'; document.getElementById('2502.14776v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages, 16 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.14644">arXiv:2502.14644</a> <span> [<a href="https://arxiv.org/pdf/2502.14644">pdf</a>, <a href="https://arxiv.org/format/2502.14644">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> LIFT: Improving Long Context Understanding of Large Language Models through Long Input Fine-Tuning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Mao%2C+Y">Yansheng Mao</a>, <a href="/search/?searchtype=author&query=Xu%2C+Y">Yufei Xu</a>, <a href="/search/?searchtype=author&query=Li%2C+J">Jiaqi Li</a>, <a href="/search/?searchtype=author&query=Meng%2C+F">Fanxu Meng</a>, <a href="/search/?searchtype=author&query=Yang%2C+H">Haotong Yang</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zilong Zheng</a>, <a href="/search/?searchtype=author&query=Wang%2C+X">Xiyuan Wang</a>, <a href="/search/?searchtype=author&query=Zhang%2C+M">Muhan Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.14644v1-abstract-short" style="display: inline;"> Long context understanding remains challenging for large language models due to their limited context windows. This paper presents Long Input Fine-Tuning (LIFT), a novel framework for long-context modeling that can improve the long-context performance of arbitrary (short-context) LLMs by dynamically adapting model parameters based on the long input. Importantly, LIFT, rather than endlessly extendi… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.14644v1-abstract-full').style.display = 'inline'; document.getElementById('2502.14644v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.14644v1-abstract-full" style="display: none;"> Long context understanding remains challenging for large language models due to their limited context windows. This paper presents Long Input Fine-Tuning (LIFT), a novel framework for long-context modeling that can improve the long-context performance of arbitrary (short-context) LLMs by dynamically adapting model parameters based on the long input. Importantly, LIFT, rather than endlessly extending the context window size to accommodate increasingly longer inputs in context, chooses to store and absorb the long input in parameter. By fine-tuning the long input into model parameters, LIFT allows short-context LLMs to answer questions even when the required information is not provided in the context during inference. Furthermore, to enhance LIFT performance while maintaining the original in-context learning (ICL) capabilities, we introduce Gated Memory, a specialized attention adapter that automatically balances long input memorization and ICL. We provide a comprehensive analysis of the strengths and limitations of LIFT on long context understanding, offering valuable directions for future research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.14644v1-abstract-full').style.display = 'none'; document.getElementById('2502.14644v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: text overlap with arXiv:2412.13626</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.14279">arXiv:2502.14279</a> <span> [<a href="https://arxiv.org/pdf/2502.14279">pdf</a>, <a href="https://arxiv.org/format/2502.14279">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> OrchardDepth: Precise Metric Depth Estimation of Orchard Scene from Monocular Camera Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zhichao Zheng</a>, <a href="/search/?searchtype=author&query=Williams%2C+H">Henry Williams</a>, <a href="/search/?searchtype=author&query=MacDonald%2C+B+A">Bruce A MacDonald</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.14279v1-abstract-short" style="display: inline;"> Monocular depth estimation is a rudimentary task in robotic perception. Recently, with the development of more accurate and robust neural network models and different types of datasets, monocular depth estimation has significantly improved performance and efficiency. However, most of the research in this area focuses on very concentrated domains. In particular, most of the benchmarks in outdoor sc… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.14279v1-abstract-full').style.display = 'inline'; document.getElementById('2502.14279v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.14279v1-abstract-full" style="display: none;"> Monocular depth estimation is a rudimentary task in robotic perception. Recently, with the development of more accurate and robust neural network models and different types of datasets, monocular depth estimation has significantly improved performance and efficiency. However, most of the research in this area focuses on very concentrated domains. In particular, most of the benchmarks in outdoor scenarios belong to urban environments for the improvement of autonomous driving devices, and these benchmarks have a massive disparity with the orchard/vineyard environment, which is hardly helpful for research in the primary industry. Therefore, we propose OrchardDepth, which fills the gap in the estimation of the metric depth of the monocular camera in the orchard/vineyard environment. In addition, we present a new retraining method to improve the training result by monitoring the consistent regularization between dense depth maps and sparse points. Our method improves the RMSE of depth estimation in the orchard environment from 1.5337 to 0.6738, proving our method's validation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.14279v1-abstract-full').style.display = 'none'; document.getElementById('2502.14279v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 5 figures, Australasian Conference on Robotics and Automation, ACRA, 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.13562">arXiv:2502.13562</a> <span> [<a href="https://arxiv.org/pdf/2502.13562">pdf</a>, <a href="https://arxiv.org/format/2502.13562">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Are Large Language Models In-Context Graph Learners? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Li%2C+J">Jintang Li</a>, <a href="/search/?searchtype=author&query=Wu%2C+R">Ruofan Wu</a>, <a href="/search/?searchtype=author&query=Zhu%2C+Y">Yuchang Zhu</a>, <a href="/search/?searchtype=author&query=Zhang%2C+H">Huizhe Zhang</a>, <a href="/search/?searchtype=author&query=Chen%2C+L">Liang Chen</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zibin Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.13562v1-abstract-short" style="display: inline;"> Large language models (LLMs) have demonstrated remarkable in-context reasoning capabilities across a wide range of tasks, particularly with unstructured inputs such as language or images. However, LLMs struggle to handle structured data, such as graphs, due to their lack of understanding of non-Euclidean structures. As a result, without additional fine-tuning, their performance significantly lags… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.13562v1-abstract-full').style.display = 'inline'; document.getElementById('2502.13562v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.13562v1-abstract-full" style="display: none;"> Large language models (LLMs) have demonstrated remarkable in-context reasoning capabilities across a wide range of tasks, particularly with unstructured inputs such as language or images. However, LLMs struggle to handle structured data, such as graphs, due to their lack of understanding of non-Euclidean structures. As a result, without additional fine-tuning, their performance significantly lags behind that of graph neural networks (GNNs) in graph learning tasks. In this paper, we show that learning on graph data can be conceptualized as a retrieval-augmented generation (RAG) process, where specific instances (e.g., nodes or edges) act as queries, and the graph itself serves as the retrieved context. Building on this insight, we propose a series of RAG frameworks to enhance the in-context learning capabilities of LLMs for graph learning tasks. Comprehensive evaluations demonstrate that our proposed RAG frameworks significantly improve LLM performance on graph-based tasks, particularly in scenarios where a pretrained LLM must be used without modification or accessed via an API. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.13562v1-abstract-full').style.display = 'none'; document.getElementById('2502.13562v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Preprint, under review</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.12532">arXiv:2502.12532</a> <span> [<a href="https://arxiv.org/pdf/2502.12532">pdf</a>, <a href="https://arxiv.org/format/2502.12532">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> CityEQA: A Hierarchical LLM Agent on Embodied Question Answering Benchmark in City Space </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zhao%2C+Y">Yong Zhao</a>, <a href="/search/?searchtype=author&query=Xu%2C+K">Kai Xu</a>, <a href="/search/?searchtype=author&query=Zhu%2C+Z">Zhengqiu Zhu</a>, <a href="/search/?searchtype=author&query=Hu%2C+Y">Yue Hu</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zhiheng Zheng</a>, <a href="/search/?searchtype=author&query=Chen%2C+Y">Yingfeng Chen</a>, <a href="/search/?searchtype=author&query=Ji%2C+Y">Yatai Ji</a>, <a href="/search/?searchtype=author&query=Gao%2C+C">Chen Gao</a>, <a href="/search/?searchtype=author&query=Li%2C+Y">Yong Li</a>, <a href="/search/?searchtype=author&query=Huang%2C+J">Jincai Huang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.12532v2-abstract-short" style="display: inline;"> Embodied Question Answering (EQA) has primarily focused on indoor environments, leaving the complexities of urban settings - spanning environment, action, and perception - largely unexplored. To bridge this gap, we introduce CityEQA, a new task where an embodied agent answers open-vocabulary questions through active exploration in dynamic city spaces. To support this task, we present CityEQA-EC, t… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.12532v2-abstract-full').style.display = 'inline'; document.getElementById('2502.12532v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.12532v2-abstract-full" style="display: none;"> Embodied Question Answering (EQA) has primarily focused on indoor environments, leaving the complexities of urban settings - spanning environment, action, and perception - largely unexplored. To bridge this gap, we introduce CityEQA, a new task where an embodied agent answers open-vocabulary questions through active exploration in dynamic city spaces. To support this task, we present CityEQA-EC, the first benchmark dataset featuring 1,412 human-annotated tasks across six categories, grounded in a realistic 3D urban simulator. Moreover, we propose Planner-Manager-Actor (PMA), a novel agent tailored for CityEQA. PMA enables long-horizon planning and hierarchical task execution: the Planner breaks down the question answering into sub-tasks, the Manager maintains an object-centric cognitive map for spatial reasoning during the process control, and the specialized Actors handle navigation, exploration, and collection sub-tasks. Experiments demonstrate that PMA achieves 60.7% of human-level answering accuracy, significantly outperforming frontier-based baselines. While promising, the performance gap compared to humans highlights the need for enhanced visual reasoning in CityEQA. This work paves the way for future advancements in urban spatial intelligence. Dataset and code are available at https://github.com/BiluYong/CityEQA.git. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.12532v2-abstract-full').style.display = 'none'; document.getElementById('2502.12532v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.12479">arXiv:2502.12479</a> <span> [<a href="https://arxiv.org/pdf/2502.12479">pdf</a>, <a href="https://arxiv.org/format/2502.12479">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Biomolecules">q-bio.BM</span> </div> </div> <p class="title is-5 mathjax"> MotifBench: A standardized protein design benchmark for motif-scaffolding problems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zhuoqi Zheng</a>, <a href="/search/?searchtype=author&query=Zhang%2C+B">Bo Zhang</a>, <a href="/search/?searchtype=author&query=Didi%2C+K">Kieran Didi</a>, <a href="/search/?searchtype=author&query=Yang%2C+K+K">Kevin K. Yang</a>, <a href="/search/?searchtype=author&query=Yim%2C+J">Jason Yim</a>, <a href="/search/?searchtype=author&query=Watson%2C+J+L">Joseph L. Watson</a>, <a href="/search/?searchtype=author&query=Chen%2C+H">Hai-Feng Chen</a>, <a href="/search/?searchtype=author&query=Trippe%2C+B+L">Brian L. Trippe</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.12479v2-abstract-short" style="display: inline;"> The motif-scaffolding problem is a central task in computational protein design: Given the coordinates of atoms in a geometry chosen to confer a desired biochemical function (a motif), the task is to identify diverse protein structures (scaffolds) that include the motif and maintain its geometry. Significant recent progress on motif-scaffolding has been made due to computational evaluation with re… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.12479v2-abstract-full').style.display = 'inline'; document.getElementById('2502.12479v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.12479v2-abstract-full" style="display: none;"> The motif-scaffolding problem is a central task in computational protein design: Given the coordinates of atoms in a geometry chosen to confer a desired biochemical function (a motif), the task is to identify diverse protein structures (scaffolds) that include the motif and maintain its geometry. Significant recent progress on motif-scaffolding has been made due to computational evaluation with reliable protein structure prediction and fixed-backbone sequence design methods. However, significant variability in evaluation strategies across publications has hindered comparability of results, challenged reproducibility, and impeded robust progress. In response we introduce MotifBench, comprising (1) a precisely specified pipeline and evaluation metrics, (2) a collection of 30 benchmark problems, and (3) an implementation of this benchmark and leaderboard at github.com/blt2114/MotifBench. The MotifBench test cases are more difficult compared to earlier benchmarks, and include protein design problems for which solutions are known but on which, to the best of our knowledge, state-of-the-art methods fail to identify any solution. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.12479v2-abstract-full').style.display = 'none'; document.getElementById('2502.12479v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Associated content available at github.com/blt2114/MotifBench</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.12391">arXiv:2502.12391</a> <span> [<a href="https://arxiv.org/pdf/2502.12391">pdf</a>, <a href="https://arxiv.org/format/2502.12391">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Reward-Safety Balance in Offline Safe RL via Diffusion Regularization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Guo%2C+J">Junyu Guo</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zhi Zheng</a>, <a href="/search/?searchtype=author&query=Ying%2C+D">Donghao Ying</a>, <a href="/search/?searchtype=author&query=Jin%2C+M">Ming Jin</a>, <a href="/search/?searchtype=author&query=Gu%2C+S">Shangding Gu</a>, <a href="/search/?searchtype=author&query=Spanos%2C+C">Costas Spanos</a>, <a href="/search/?searchtype=author&query=Lavaei%2C+J">Javad Lavaei</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.12391v1-abstract-short" style="display: inline;"> Constrained reinforcement learning (RL) seeks high-performance policies under safety constraints. We focus on an offline setting where the agent has only a fixed dataset -- common in realistic tasks to prevent unsafe exploration. To address this, we propose Diffusion-Regularized Constrained Offline Reinforcement Learning (DRCORL), which first uses a diffusion model to capture the behavioral policy… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.12391v1-abstract-full').style.display = 'inline'; document.getElementById('2502.12391v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.12391v1-abstract-full" style="display: none;"> Constrained reinforcement learning (RL) seeks high-performance policies under safety constraints. We focus on an offline setting where the agent has only a fixed dataset -- common in realistic tasks to prevent unsafe exploration. To address this, we propose Diffusion-Regularized Constrained Offline Reinforcement Learning (DRCORL), which first uses a diffusion model to capture the behavioral policy from offline data and then extracts a simplified policy to enable efficient inference. We further apply gradient manipulation for safety adaptation, balancing the reward objective and constraint satisfaction. This approach leverages high-quality offline data while incorporating safety requirements. Empirical results show that DRCORL achieves reliable safety performance, fast inference, and strong reward outcomes across robot learning tasks. Compared to existing safe offline RL methods, it consistently meets cost limits and performs well with the same hyperparameters, indicating practical applicability in real-world scenarios. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.12391v1-abstract-full').style.display = 'none'; document.getElementById('2502.12391v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.12224">arXiv:2502.12224</a> <span> [<a href="https://arxiv.org/pdf/2502.12224">pdf</a>, <a href="https://arxiv.org/format/2502.12224">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Accurate Expert Predictions in MoE Inference via Cross-Layer Gate </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Fang%2C+Z">Zhiyuan Fang</a>, <a href="/search/?searchtype=author&query=Hong%2C+Z">Zicong Hong</a>, <a href="/search/?searchtype=author&query=Huang%2C+Y">Yuegui Huang</a>, <a href="/search/?searchtype=author&query=Lyu%2C+Y">Yufeng Lyu</a>, <a href="/search/?searchtype=author&query=Chen%2C+W">Wuhui Chen</a>, <a href="/search/?searchtype=author&query=Yu%2C+Y">Yue Yu</a>, <a href="/search/?searchtype=author&query=Yu%2C+F">Fan Yu</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zibin Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.12224v1-abstract-short" style="display: inline;"> Large Language Models (LLMs) have demonstrated impressive performance across various tasks, and their application in edge scenarios has attracted significant attention. However, sparse-activated Mixture-of-Experts (MoE) models, which are well suited for edge scenarios, have received relatively little attention due to their high memory demands. Offload-based methods have been proposed to address th… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.12224v1-abstract-full').style.display = 'inline'; document.getElementById('2502.12224v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.12224v1-abstract-full" style="display: none;"> Large Language Models (LLMs) have demonstrated impressive performance across various tasks, and their application in edge scenarios has attracted significant attention. However, sparse-activated Mixture-of-Experts (MoE) models, which are well suited for edge scenarios, have received relatively little attention due to their high memory demands. Offload-based methods have been proposed to address this challenge, but they face difficulties with expert prediction. Inaccurate expert predictions can result in prolonged inference delays. To promote the application of MoE models in edge scenarios, we propose Fate, an offloading system designed for MoE models to enable efficient inference in resource-constrained environments. The key insight behind Fate is that gate inputs from adjacent layers can be effectively used for expert prefetching, achieving high prediction accuracy without additional GPU overhead. Furthermore, Fate employs a shallow-favoring expert caching strategy that increases the expert hit rate to 99\%. Additionally, Fate integrates tailored quantization strategies for cache optimization and IO efficiency. Experimental results show that, compared to Load on Demand and Expert Activation Path-based method, Fate achieves up to 4.5x and 1.9x speedups in prefill speed and up to 4.1x and 2.2x speedups in decoding speed, respectively, while maintaining inference quality. Moreover, Fate's performance improvements are scalable across different memory budgets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.12224v1-abstract-full').style.display = 'none'; document.getElementById('2502.12224v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.12176">arXiv:2502.12176</a> <span> [<a href="https://arxiv.org/pdf/2502.12176">pdf</a>, <a href="https://arxiv.org/format/2502.12176">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Ten Challenging Problems in Federated Foundation Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Fan%2C+T">Tao Fan</a>, <a href="/search/?searchtype=author&query=Gu%2C+H">Hanlin Gu</a>, <a href="/search/?searchtype=author&query=Cao%2C+X">Xuemei Cao</a>, <a href="/search/?searchtype=author&query=Chan%2C+C+S">Chee Seng Chan</a>, <a href="/search/?searchtype=author&query=Chen%2C+Q">Qian Chen</a>, <a href="/search/?searchtype=author&query=Chen%2C+Y">Yiqiang Chen</a>, <a href="/search/?searchtype=author&query=Feng%2C+Y">Yihui Feng</a>, <a href="/search/?searchtype=author&query=Gu%2C+Y">Yang Gu</a>, <a href="/search/?searchtype=author&query=Geng%2C+J">Jiaxiang Geng</a>, <a href="/search/?searchtype=author&query=Luo%2C+B">Bing Luo</a>, <a href="/search/?searchtype=author&query=Liu%2C+S">Shuoling Liu</a>, <a href="/search/?searchtype=author&query=Ong%2C+W+K">Win Kent Ong</a>, <a href="/search/?searchtype=author&query=Ren%2C+C">Chao Ren</a>, <a href="/search/?searchtype=author&query=Shao%2C+J">Jiaqi Shao</a>, <a href="/search/?searchtype=author&query=Sun%2C+C">Chuan Sun</a>, <a href="/search/?searchtype=author&query=Tang%2C+X">Xiaoli Tang</a>, <a href="/search/?searchtype=author&query=Tae%2C+H+X">Hong Xi Tae</a>, <a href="/search/?searchtype=author&query=Tong%2C+Y">Yongxin Tong</a>, <a href="/search/?searchtype=author&query=Wei%2C+S">Shuyue Wei</a>, <a href="/search/?searchtype=author&query=Wu%2C+F">Fan Wu</a>, <a href="/search/?searchtype=author&query=Xi%2C+W">Wei Xi</a>, <a href="/search/?searchtype=author&query=Xu%2C+M">Mingcong Xu</a>, <a href="/search/?searchtype=author&query=Yang%2C+H">He Yang</a>, <a href="/search/?searchtype=author&query=Yang%2C+X">Xin Yang</a>, <a href="/search/?searchtype=author&query=Yan%2C+J">Jiangpeng Yan</a> , et al. (8 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.12176v1-abstract-short" style="display: inline;"> Federated Foundation Models (FedFMs) represent a distributed learning paradigm that fuses general competences of foundation models as well as privacy-preserving capabilities of federated learning. This combination allows the large foundation models and the small local domain models at the remote clients to learn from each other in a teacher-student learning setting. This paper provides a comprehen… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.12176v1-abstract-full').style.display = 'inline'; document.getElementById('2502.12176v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.12176v1-abstract-full" style="display: none;"> Federated Foundation Models (FedFMs) represent a distributed learning paradigm that fuses general competences of foundation models as well as privacy-preserving capabilities of federated learning. This combination allows the large foundation models and the small local domain models at the remote clients to learn from each other in a teacher-student learning setting. This paper provides a comprehensive summary of the ten challenging problems inherent in FedFMs, encompassing foundational theory, utilization of private data, continual learning, unlearning, Non-IID and graph data, bidirectional knowledge transfer, incentive mechanism design, game mechanism design, model watermarking, and efficiency. The ten challenging problems manifest in five pivotal aspects: ``Foundational Theory," which aims to establish a coherent and unifying theoretical framework for FedFMs. ``Data," addressing the difficulties in leveraging domain-specific knowledge from private data while maintaining privacy; ``Heterogeneity," examining variations in data, model, and computational resources across clients; ``Security and Privacy," focusing on defenses against malicious attacks and model theft; and ``Efficiency," highlighting the need for improvements in training, communication, and parameter efficiency. For each problem, we offer a clear mathematical definition on the objective function, analyze existing methods, and discuss the key challenges and potential solutions. This in-depth exploration aims to advance the theoretical foundations of FedFMs, guide practical implementations, and inspire future research to overcome these obstacles, thereby enabling the robust, efficient, and privacy-preserving FedFMs in various real-world applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.12176v1-abstract-full').style.display = 'none'; document.getElementById('2502.12176v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.11811">arXiv:2502.11811</a> <span> [<a href="https://arxiv.org/pdf/2502.11811">pdf</a>, <a href="https://arxiv.org/format/2502.11811">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> FineFilter: A Fine-grained Noise Filtering Mechanism for Retrieval-Augmented Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zhang%2C+Q">Qianchi Zhang</a>, <a href="/search/?searchtype=author&query=Zhang%2C+H">Hainan Zhang</a>, <a href="/search/?searchtype=author&query=Pang%2C+L">Liang Pang</a>, <a href="/search/?searchtype=author&query=Zheng%2C+H">Hongwei Zheng</a>, <a href="/search/?searchtype=author&query=Tong%2C+Y">Yongxin Tong</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zhiming Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.11811v2-abstract-short" style="display: inline;"> Retrieved documents containing noise will hinder Retrieval-Augmented Generation (RAG) from detecting answer clues, necessitating noise filtering mechanisms to enhance accuracy. Existing methods use re-ranking or summarization to identify the most relevant sentences, but directly and accurately locating answer clues from these large-scale and complex documents remains challenging. Unlike these docu… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.11811v2-abstract-full').style.display = 'inline'; document.getElementById('2502.11811v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.11811v2-abstract-full" style="display: none;"> Retrieved documents containing noise will hinder Retrieval-Augmented Generation (RAG) from detecting answer clues, necessitating noise filtering mechanisms to enhance accuracy. Existing methods use re-ranking or summarization to identify the most relevant sentences, but directly and accurately locating answer clues from these large-scale and complex documents remains challenging. Unlike these document-level operations, we treat noise filtering as a sentence-level MinMax optimization problem: first identifying the potential clues from multiple documents using contextual information, then ranking them by relevance, and finally retaining the least clues through truncation. In this paper, we propose FineFilter, a novel fine-grained noise filtering mechanism for RAG consisting of a clue extractor, a re-ranker, and a truncator. We optimize each module to tackle complex reasoning challenges: (1) Clue extractor firstly uses sentences containing the answer and similar ones as fine-tuned targets, aiming at extracting sufficient potential clues; (2) Re-ranker is trained to prioritize effective clues based on the real feedback from generation module, with clues capable of generating correct answer as positive samples and others as negative; (3) Truncator takes the minimum clues needed to answer the question (truncation point) as fine-tuned targets, and performs truncation on the re-ranked clues to achieve fine-grained noise filtering. Experiments on three QA datasets demonstrate that FineFilter significantly outperforms baselines in terms of performance and inference cost. Further analysis on each module shows the effectiveness of our optimizations for complex reasoning. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.11811v2-abstract-full').style.display = 'none'; document.getElementById('2502.11811v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.11718">arXiv:2502.11718</a> <span> [<a href="https://arxiv.org/pdf/2502.11718">pdf</a>, <a href="https://arxiv.org/format/2502.11718">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> ChineseSimpleVQA -- "See the World, Discover Knowledge": A Chinese Factuality Evaluation for Large Vision Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Gu%2C+J">Jihao Gu</a>, <a href="/search/?searchtype=author&query=Wang%2C+Y">Yingyao Wang</a>, <a href="/search/?searchtype=author&query=Bu%2C+P">Pi Bu</a>, <a href="/search/?searchtype=author&query=Wang%2C+C">Chen Wang</a>, <a href="/search/?searchtype=author&query=Wang%2C+Z">Ziming Wang</a>, <a href="/search/?searchtype=author&query=Song%2C+T">Tengtao Song</a>, <a href="/search/?searchtype=author&query=Wei%2C+D">Donglai Wei</a>, <a href="/search/?searchtype=author&query=Yuan%2C+J">Jiale Yuan</a>, <a href="/search/?searchtype=author&query=Zhao%2C+Y">Yingxiu Zhao</a>, <a href="/search/?searchtype=author&query=He%2C+Y">Yancheng He</a>, <a href="/search/?searchtype=author&query=Li%2C+S">Shilong Li</a>, <a href="/search/?searchtype=author&query=Liu%2C+J">Jiaheng Liu</a>, <a href="/search/?searchtype=author&query=Cao%2C+M">Meng Cao</a>, <a href="/search/?searchtype=author&query=Song%2C+J">Jun Song</a>, <a href="/search/?searchtype=author&query=Tan%2C+Y">Yingshui Tan</a>, <a href="/search/?searchtype=author&query=Li%2C+X">Xiang Li</a>, <a href="/search/?searchtype=author&query=Su%2C+W">Wenbo Su</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zhicheng Zheng</a>, <a href="/search/?searchtype=author&query=Zhu%2C+X">Xiaoyong Zhu</a>, <a href="/search/?searchtype=author&query=Zheng%2C+B">Bo Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.11718v2-abstract-short" style="display: inline;"> The evaluation of factual accuracy in large vision language models (LVLMs) has lagged behind their rapid development, making it challenging to fully reflect these models' knowledge capacity and reliability. In this paper, we introduce the first factuality-based visual question-answering benchmark in Chinese, named ChineseSimpleVQA, aimed at assessing the visual factuality of LVLMs across 8 major t… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.11718v2-abstract-full').style.display = 'inline'; document.getElementById('2502.11718v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.11718v2-abstract-full" style="display: none;"> The evaluation of factual accuracy in large vision language models (LVLMs) has lagged behind their rapid development, making it challenging to fully reflect these models' knowledge capacity and reliability. In this paper, we introduce the first factuality-based visual question-answering benchmark in Chinese, named ChineseSimpleVQA, aimed at assessing the visual factuality of LVLMs across 8 major topics and 56 subtopics. The key features of this benchmark include a focus on the Chinese language, diverse knowledge types, a multi-hop question construction, high-quality data, static consistency, and easy-to-evaluate through short answers. Moreover, we contribute a rigorous data construction pipeline and decouple the visual factuality into two parts: seeing the world (i.e., object recognition) and discovering knowledge. This decoupling allows us to analyze the capability boundaries and execution mechanisms of LVLMs. Subsequently, we evaluate 34 advanced open-source and closed-source models, revealing critical performance gaps within this field. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.11718v2-abstract-full').style.display = 'none'; document.getElementById('2502.11718v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">24 pages, 21 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.11213">arXiv:2502.11213</a> <span> [<a href="https://arxiv.org/pdf/2502.11213">pdf</a>, <a href="https://arxiv.org/format/2502.11213">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Stochastic Optimization of Inventory at Large-scale Supply Chains </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Jin%2C+Z+L">Zhaoyang Larry Jin</a>, <a href="/search/?searchtype=author&query=Maasoumy%2C+M">Mehdi Maasoumy</a>, <a href="/search/?searchtype=author&query=Liu%2C+Y">Yimin Liu</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zeshi Zheng</a>, <a href="/search/?searchtype=author&query=Ren%2C+Z">Zizhuo Ren</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.11213v1-abstract-short" style="display: inline;"> Today's global supply chains face growing challenges due to rapidly changing market conditions, increased network complexity and inter-dependency, and dynamic uncertainties in supply, demand, and other factors. To combat these challenges, organizations employ Material Requirements Planning (MRP) software solutions to set inventory stock buffers - for raw materials, work-in-process goods, and finis… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.11213v1-abstract-full').style.display = 'inline'; document.getElementById('2502.11213v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.11213v1-abstract-full" style="display: none;"> Today's global supply chains face growing challenges due to rapidly changing market conditions, increased network complexity and inter-dependency, and dynamic uncertainties in supply, demand, and other factors. To combat these challenges, organizations employ Material Requirements Planning (MRP) software solutions to set inventory stock buffers - for raw materials, work-in-process goods, and finished products - to help them meet customer service levels. However, holding excess inventory further complicates operations and can lock up millions of dollars of capital that could be otherwise deployed. Furthermore, most commercially available MRP solutions fall short in considering uncertainties and do not result in optimal solutions for modern enterprises. At C3 AI, we fundamentally reformulate the inventory management problem as a constrained stochastic optimization. We then propose a simulation-optimization framework that minimizes inventory and related costs while maintaining desired service levels. The framework's goal is to find the optimal reorder parameters that minimize costs subject to a pre-defined service-level constraint and all other real-world operational constraints. These optimal reorder parameters can be fed back into an MRP system to drive optimal order placement, or used to place optimal orders directly. This approach has proven successful in reducing inventory levels by 10-35 percent, resulting in hundreds of millions of dollars of economic benefit for major enterprises at a global scale. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.11213v1-abstract-full').style.display = 'none'; document.getElementById('2502.11213v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.10291">arXiv:2502.10291</a> <span> [<a href="https://arxiv.org/pdf/2502.10291">pdf</a>, <a href="https://arxiv.org/format/2502.10291">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Angular analysis of $B^0\rightarrow K^{*0}e^{+}e^{-}$ decays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=LHCb+collaboration"> LHCb collaboration</a>, <a href="/search/?searchtype=author&query=Aaij%2C+R">R. Aaij</a>, <a href="/search/?searchtype=author&query=Abdelmotteleb%2C+A+S+W">A. S. W. Abdelmotteleb</a>, <a href="/search/?searchtype=author&query=Beteta%2C+C+A">C. Abellan Beteta</a>, <a href="/search/?searchtype=author&query=Abudin%C3%A9n%2C+F">F. Abudin茅n</a>, <a href="/search/?searchtype=author&query=Ackernley%2C+T">T. Ackernley</a>, <a href="/search/?searchtype=author&query=Adefisoye%2C+A+A">A. A. Adefisoye</a>, <a href="/search/?searchtype=author&query=Adeva%2C+B">B. Adeva</a>, <a href="/search/?searchtype=author&query=Adinolfi%2C+M">M. Adinolfi</a>, <a href="/search/?searchtype=author&query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&query=Agapopoulou%2C+C">C. Agapopoulou</a>, <a href="/search/?searchtype=author&query=Aidala%2C+C+A">C. A. Aidala</a>, <a href="/search/?searchtype=author&query=Ajaltouni%2C+Z">Z. Ajaltouni</a>, <a href="/search/?searchtype=author&query=Akar%2C+S">S. Akar</a>, <a href="/search/?searchtype=author&query=Akiba%2C+K">K. Akiba</a>, <a href="/search/?searchtype=author&query=Albicocco%2C+P">P. Albicocco</a>, <a href="/search/?searchtype=author&query=Albrecht%2C+J">J. Albrecht</a>, <a href="/search/?searchtype=author&query=Alessio%2C+F">F. Alessio</a>, <a href="/search/?searchtype=author&query=Alexander%2C+M">M. Alexander</a>, <a href="/search/?searchtype=author&query=Aliouche%2C+Z">Z. Aliouche</a>, <a href="/search/?searchtype=author&query=Cartelle%2C+P+A">P. Alvarez Cartelle</a>, <a href="/search/?searchtype=author&query=Amalric%2C+R">R. Amalric</a>, <a href="/search/?searchtype=author&query=Amato%2C+S">S. Amato</a>, <a href="/search/?searchtype=author&query=Amey%2C+J+L">J. L. Amey</a>, <a href="/search/?searchtype=author&query=Amhis%2C+Y">Y. Amhis</a> , et al. (1115 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.10291v1-abstract-short" style="display: inline;"> An angular analysis of $B^0\rightarrow K^{*0}e^{+}e^{-}$ decays is presented using proton-proton collision data collected by the LHCb experiment at centre-of-mass energies of 7, 8 and 13 TeV, corresponding to an integrated luminosity of 9 fb$^{-1}$. The analysis is performed in the region of the dilepton invariant mass squared of 1.1-6.0 GeV$^{2}/c^{4}$. In addition, a test of lepton flavour unive… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.10291v1-abstract-full').style.display = 'inline'; document.getElementById('2502.10291v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.10291v1-abstract-full" style="display: none;"> An angular analysis of $B^0\rightarrow K^{*0}e^{+}e^{-}$ decays is presented using proton-proton collision data collected by the LHCb experiment at centre-of-mass energies of 7, 8 and 13 TeV, corresponding to an integrated luminosity of 9 fb$^{-1}$. The analysis is performed in the region of the dilepton invariant mass squared of 1.1-6.0 GeV$^{2}/c^{4}$. In addition, a test of lepton flavour universality is performed by comparing the obtained angular observables with those measured in $B^0\rightarrow K^{*0}渭^{+}渭^{-}$ decays. In general, the angular observables are found to be consistent with the Standard Model expectations as well as with global analyses of other $b \rightarrow s \ell^{+} \ell^{-}$ processes, where $\ell$ is either a muon or an electron. No sign of lepton-flavour-violating effects is observed. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.10291v1-abstract-full').style.display = 'none'; document.getElementById('2502.10291v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">All figures and tables, along with machine-readable versions and any supplementary material and additional information, are available at https://lbfence.cern.ch/alcm/public/analysis/full-details/1628/ (LHCb public pages)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> LHCb-PAPER-2024-022, CERN-EP-2025-001 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.09624">arXiv:2502.09624</a> <span> [<a href="https://arxiv.org/pdf/2502.09624">pdf</a>, <a href="https://arxiv.org/format/2502.09624">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Efficient and Trustworthy Block Propagation for Blockchain-enabled Mobile Embodied AI Networks: A Graph Resfusion Approach </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Kang%2C+J">Jiawen Kang</a>, <a href="/search/?searchtype=author&query=Liao%2C+J">Jiana Liao</a>, <a href="/search/?searchtype=author&query=Gao%2C+R">Runquan Gao</a>, <a href="/search/?searchtype=author&query=Wen%2C+J">Jinbo Wen</a>, <a href="/search/?searchtype=author&query=Huang%2C+H">Huawei Huang</a>, <a href="/search/?searchtype=author&query=Zhang%2C+M">Maomao Zhang</a>, <a href="/search/?searchtype=author&query=Yi%2C+C">Changyan Yi</a>, <a href="/search/?searchtype=author&query=Zhang%2C+T">Tao Zhang</a>, <a href="/search/?searchtype=author&query=Niyato%2C+D">Dusit Niyato</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zibin Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.09624v1-abstract-short" style="display: inline;"> By synergistically integrating mobile networks and embodied artificial intelligence (AI), Mobile Embodied AI Networks (MEANETs) represent an advanced paradigm that facilitates autonomous, context-aware, and interactive behaviors within dynamic environments. Nevertheless, the rapid development of MEANETs is accompanied by challenges in trustworthiness and operational efficiency. Fortunately, blockc… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.09624v1-abstract-full').style.display = 'inline'; document.getElementById('2502.09624v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.09624v1-abstract-full" style="display: none;"> By synergistically integrating mobile networks and embodied artificial intelligence (AI), Mobile Embodied AI Networks (MEANETs) represent an advanced paradigm that facilitates autonomous, context-aware, and interactive behaviors within dynamic environments. Nevertheless, the rapid development of MEANETs is accompanied by challenges in trustworthiness and operational efficiency. Fortunately, blockchain technology, with its decentralized and immutable characteristics, offers promising solutions for MEANETs. However, existing block propagation mechanisms suffer from challenges such as low propagation efficiency and weak security for block propagation, which results in delayed transmission of vehicular messages or vulnerability to malicious tampering, potentially causing severe traffic accidents in blockchain-enabled MEANETs. Moreover, current block propagation strategies cannot effectively adapt to real-time changes of dynamic topology in MEANETs. Therefore, in this paper, we propose a graph Resfusion model-based trustworthy block propagation optimization framework for consortium blockchain-enabled MEANETs. Specifically, we propose an innovative trust calculation mechanism based on the trust cloud model, which comprehensively accounts for randomness and fuzziness in the miner trust evaluation. Furthermore, by leveraging the strengths of graph neural networks and diffusion models, we develop a graph Resfusion model to effectively and adaptively generate the optimal block propagation trajectory. Simulation results demonstrate that the proposed model outperforms other routing mechanisms in terms of block propagation efficiency and trustworthiness. Additionally, the results highlight its strong adaptability to dynamic environments, making it particularly suitable for rapidly changing MEANETs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.09624v1-abstract-full').style.display = 'none'; document.getElementById('2502.09624v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages, 11 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.09058">arXiv:2502.09058</a> <span> [<a href="https://arxiv.org/pdf/2502.09058">pdf</a>, <a href="https://arxiv.org/format/2502.09058">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3696410.3714758">10.1145/3696410.3714758 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Unleashing the Power of Large Language Model for Denoising Recommendation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Wang%2C+S">Shuyao Wang</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zhi Zheng</a>, <a href="/search/?searchtype=author&query=Sui%2C+Y">Yongduo Sui</a>, <a href="/search/?searchtype=author&query=Xiong%2C+H">Hui Xiong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.09058v1-abstract-short" style="display: inline;"> Recommender systems are crucial for personalizing user experiences but often depend on implicit feedback data, which can be noisy and misleading. Existing denoising studies involve incorporating auxiliary information or learning strategies from interaction data. However, they struggle with the inherent limitations of external knowledge and interaction data, as well as the non-universality of certa… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.09058v1-abstract-full').style.display = 'inline'; document.getElementById('2502.09058v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.09058v1-abstract-full" style="display: none;"> Recommender systems are crucial for personalizing user experiences but often depend on implicit feedback data, which can be noisy and misleading. Existing denoising studies involve incorporating auxiliary information or learning strategies from interaction data. However, they struggle with the inherent limitations of external knowledge and interaction data, as well as the non-universality of certain predefined assumptions, hindering accurate noise identification. Recently, large language models (LLMs) have gained attention for their extensive world knowledge and reasoning abilities, yet their potential in enhancing denoising in recommendations remains underexplored. In this paper, we introduce LLaRD, a framework leveraging LLMs to improve denoising in recommender systems, thereby boosting overall recommendation performance. Specifically, LLaRD generates denoising-related knowledge by first enriching semantic insights from observational data via LLMs and inferring user-item preference knowledge. It then employs a novel Chain-of-Thought (CoT) technique over user-item interaction graphs to reveal relation knowledge for denoising. Finally, it applies the Information Bottleneck (IB) principle to align LLM-generated denoising knowledge with recommendation targets, filtering out noise and irrelevant LLM knowledge. Empirical results demonstrate LLaRD's effectiveness in enhancing denoising and recommendation accuracy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.09058v1-abstract-full').style.display = 'none'; document.getElementById('2502.09058v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 5 figures, 4 tables. Accecpted by WWW 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.08691">arXiv:2502.08691</a> <span> [<a href="https://arxiv.org/pdf/2502.08691">pdf</a>, <a href="https://arxiv.org/format/2502.08691">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> AgentSociety: Large-Scale Simulation of LLM-Driven Generative Agents Advances Understanding of Human Behaviors and Society </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Piao%2C+J">Jinghua Piao</a>, <a href="/search/?searchtype=author&query=Yan%2C+Y">Yuwei Yan</a>, <a href="/search/?searchtype=author&query=Zhang%2C+J">Jun Zhang</a>, <a href="/search/?searchtype=author&query=Li%2C+N">Nian Li</a>, <a href="/search/?searchtype=author&query=Yan%2C+J">Junbo Yan</a>, <a href="/search/?searchtype=author&query=Lan%2C+X">Xiaochong Lan</a>, <a href="/search/?searchtype=author&query=Lu%2C+Z">Zhihong Lu</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zhiheng Zheng</a>, <a href="/search/?searchtype=author&query=Wang%2C+J+Y">Jing Yi Wang</a>, <a href="/search/?searchtype=author&query=Zhou%2C+D">Di Zhou</a>, <a href="/search/?searchtype=author&query=Gao%2C+C">Chen Gao</a>, <a href="/search/?searchtype=author&query=Xu%2C+F">Fengli Xu</a>, <a href="/search/?searchtype=author&query=Zhang%2C+F">Fang Zhang</a>, <a href="/search/?searchtype=author&query=Rong%2C+K">Ke Rong</a>, <a href="/search/?searchtype=author&query=Su%2C+J">Jun Su</a>, <a href="/search/?searchtype=author&query=Li%2C+Y">Yong Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.08691v1-abstract-short" style="display: inline;"> Understanding human behavior and society is a central focus in social sciences, with the rise of generative social science marking a significant paradigmatic shift. By leveraging bottom-up simulations, it replaces costly and logistically challenging traditional experiments with scalable, replicable, and systematic computational approaches for studying complex social dynamics. Recent advances in la… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08691v1-abstract-full').style.display = 'inline'; document.getElementById('2502.08691v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.08691v1-abstract-full" style="display: none;"> Understanding human behavior and society is a central focus in social sciences, with the rise of generative social science marking a significant paradigmatic shift. By leveraging bottom-up simulations, it replaces costly and logistically challenging traditional experiments with scalable, replicable, and systematic computational approaches for studying complex social dynamics. Recent advances in large language models (LLMs) have further transformed this research paradigm, enabling the creation of human-like generative social agents and realistic simulacra of society. In this paper, we propose AgentSociety, a large-scale social simulator that integrates LLM-driven agents, a realistic societal environment, and a powerful large-scale simulation engine. Based on the proposed simulator, we generate social lives for over 10k agents, simulating their 5 million interactions both among agents and between agents and their environment. Furthermore, we explore the potential of AgentSociety as a testbed for computational social experiments, focusing on four key social issues: polarization, the spread of inflammatory messages, the effects of universal basic income policies, and the impact of external shocks such as hurricanes. These four issues serve as valuable cases for assessing AgentSociety's support for typical research methods -- such as surveys, interviews, and interventions -- as well as for investigating the patterns, causes, and underlying mechanisms of social issues. The alignment between AgentSociety's outcomes and real-world experimental results not only demonstrates its ability to capture human behaviors and their underlying mechanisms, but also underscores its potential as an important platform for social scientists and policymakers. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08691v1-abstract-full').style.display = 'none'; document.getElementById('2502.08691v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.08512">arXiv:2502.08512</a> <span> [<a href="https://arxiv.org/pdf/2502.08512">pdf</a>, <a href="https://arxiv.org/format/2502.08512">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Measuring Diversity in Synthetic Datasets </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zhu%2C+Y">Yuchang Zhu</a>, <a href="/search/?searchtype=author&query=Zhang%2C+H">Huizhe Zhang</a>, <a href="/search/?searchtype=author&query=Wu%2C+B">Bingzhe Wu</a>, <a href="/search/?searchtype=author&query=Li%2C+J">Jintang Li</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zibin Zheng</a>, <a href="/search/?searchtype=author&query=Zhao%2C+P">Peilin Zhao</a>, <a href="/search/?searchtype=author&query=Chen%2C+L">Liang Chen</a>, <a href="/search/?searchtype=author&query=Bian%2C+Y">Yatao Bian</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.08512v1-abstract-short" style="display: inline;"> Large language models (LLMs) are widely adopted to generate synthetic datasets for various natural language processing (NLP) tasks, such as text classification and summarization. However, accurately measuring the diversity of these synthetic datasets-an aspect crucial for robust model performance-remains a significant challenge. In this paper, we introduce DCScore, a novel method for measuring syn… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08512v1-abstract-full').style.display = 'inline'; document.getElementById('2502.08512v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.08512v1-abstract-full" style="display: none;"> Large language models (LLMs) are widely adopted to generate synthetic datasets for various natural language processing (NLP) tasks, such as text classification and summarization. However, accurately measuring the diversity of these synthetic datasets-an aspect crucial for robust model performance-remains a significant challenge. In this paper, we introduce DCScore, a novel method for measuring synthetic dataset diversity from a classification perspective. Specifically, DCScore formulates diversity evaluation as a sample classification task, leveraging mutual relationships among samples. We further provide theoretical verification of the diversity-related axioms satisfied by DCScore, highlighting its role as a principled diversity evaluation method. Experimental results on synthetic datasets reveal that DCScore enjoys a stronger correlation with multiple diversity pseudo-truths of evaluated datasets, underscoring its effectiveness. Moreover, both empirical and theoretical evidence demonstrate that DCScore substantially reduces computational costs compared to existing approaches. Code is available at: https://github.com/BlueWhaleLab/DCScore. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.08512v1-abstract-full').style.display = 'none'; document.getElementById('2502.08512v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.06888">arXiv:2502.06888</a> <span> [<a href="https://arxiv.org/pdf/2502.06888">pdf</a>, <a href="https://arxiv.org/format/2502.06888">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Klotski: Efficient Mixture-of-Expert Inference via Expert-Aware Multi-Batch Pipeline </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Fang%2C+Z">Zhiyuan Fang</a>, <a href="/search/?searchtype=author&query=Huang%2C+Y">Yuegui Huang</a>, <a href="/search/?searchtype=author&query=Hong%2C+Z">Zicong Hong</a>, <a href="/search/?searchtype=author&query=Lyu%2C+Y">Yufeng Lyu</a>, <a href="/search/?searchtype=author&query=Chen%2C+W">Wuhui Chen</a>, <a href="/search/?searchtype=author&query=Yu%2C+Y">Yue Yu</a>, <a href="/search/?searchtype=author&query=Yu%2C+F">Fan Yu</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zibin Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.06888v1-abstract-short" style="display: inline;"> Mixture of Experts (MoE), with its distinctive sparse structure, enables the scaling of language models up to trillions of parameters without significantly increasing computational costs. However, the substantial parameter size presents a challenge for inference, as the expansion in GPU memory cannot keep pace with the growth in parameters. Although offloading techniques utilise memory from the CP… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06888v1-abstract-full').style.display = 'inline'; document.getElementById('2502.06888v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.06888v1-abstract-full" style="display: none;"> Mixture of Experts (MoE), with its distinctive sparse structure, enables the scaling of language models up to trillions of parameters without significantly increasing computational costs. However, the substantial parameter size presents a challenge for inference, as the expansion in GPU memory cannot keep pace with the growth in parameters. Although offloading techniques utilise memory from the CPU and disk and parallelise the I/O and computation for efficiency, the computation for each expert in MoE models is often less than the I/O, resulting in numerous bubbles in the pipeline. Therefore, we propose Klotski, an efficient MoE inference engine that significantly reduces pipeline bubbles through a novel expert-aware multi-batch pipeline paradigm. The proposed paradigm uses batch processing to extend the computation time of the current layer to overlap with the loading time of the next layer. Although this idea has been effectively applied to dense models, more batches may activate more experts in the MoE, leading to longer loading times and more bubbles. Thus, unlike traditional approaches, we balance computation and I/O time and minimise bubbles by orchestrating their inference orders based on their heterogeneous computation and I/O requirements and activation patterns under different batch numbers. Moreover, to adapt to different hardware environments and models, we design a constraint-sensitive I/O-compute planner and a correlation-aware expert prefetcher for a schedule that minimises pipeline bubbles. Experimental results demonstrate that Klotski achieves a superior throughput-latency trade-off compared to state-of-the-art techniques, with throughput improvements of up to 85.12x. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06888v1-abstract-full').style.display = 'none'; document.getElementById('2502.06888v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.06825">arXiv:2502.06825</a> <span> [<a href="https://arxiv.org/pdf/2502.06825">pdf</a>, <a href="https://arxiv.org/format/2502.06825">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> </div> </div> <p class="title is-5 mathjax"> RLOMM: An Efficient and Robust Online Map Matching Framework with Reinforcement Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Chen%2C+M">Minxiao Chen</a>, <a href="/search/?searchtype=author&query=Yuan%2C+H">Haitao Yuan</a>, <a href="/search/?searchtype=author&query=Jiang%2C+N">Nan Jiang</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zhihan Zheng</a>, <a href="/search/?searchtype=author&query=Wu%2C+S">Sai Wu</a>, <a href="/search/?searchtype=author&query=Zhou%2C+A">Ao Zhou</a>, <a href="/search/?searchtype=author&query=Wang%2C+S">Shangguang Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.06825v1-abstract-short" style="display: inline;"> Online map matching is a fundamental problem in location-based services, aiming to incrementally match trajectory data step-by-step onto a road network. However, existing methods fail to meet the needs for efficiency, robustness, and accuracy required by large-scale online applications, making this task still a challenging problem. This paper introduces a novel framework that achieves high accurac… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06825v1-abstract-full').style.display = 'inline'; document.getElementById('2502.06825v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.06825v1-abstract-full" style="display: none;"> Online map matching is a fundamental problem in location-based services, aiming to incrementally match trajectory data step-by-step onto a road network. However, existing methods fail to meet the needs for efficiency, robustness, and accuracy required by large-scale online applications, making this task still a challenging problem. This paper introduces a novel framework that achieves high accuracy and efficient matching while ensuring robustness in handling diverse scenarios. To improve efficiency, we begin by modeling the online map matching problem as an Online Markov Decision Process (OMDP) based on its inherent characteristics. This approach helps efficiently merge historical and real-time data, reducing unnecessary calculations. Next, to enhance the model's robustness, we design a reinforcement learning method, enabling robust handling of real-time data from dynamically changing environments. In particular, we propose a novel model learning process and a comprehensive reward function, allowing the model to make reasonable current matches from a future-oriented perspective, and to continuously update and optimize during the decision-making process based on feedback. Lastly, to address the heterogeneity between trajectories and roads, we design distinct graph structures, facilitating efficient representation learning through graph and recurrent neural networks. To further align trajectory and road data, we introduce contrastive learning to decrease their distance in the latent space, thereby promoting effective integration of the two. Extensive evaluations on three real-world datasets confirm that our method significantly outperforms existing state-of-the-art solutions in terms of accuracy, efficiency and robustness. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06825v1-abstract-full').style.display = 'none'; document.getElementById('2502.06825v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by SIGMOD 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.06816">arXiv:2502.06816</a> <span> [<a href="https://arxiv.org/pdf/2502.06816">pdf</a>, <a href="https://arxiv.org/format/2502.06816">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> DeepCell: Multiview Representation Learning for Post-Mapping Netlists </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Shi%2C+Z">Zhengyuan Shi</a>, <a href="/search/?searchtype=author&query=Ma%2C+C">Chengyu Ma</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Ziyang Zheng</a>, <a href="/search/?searchtype=author&query=Zhou%2C+L">Lingfeng Zhou</a>, <a href="/search/?searchtype=author&query=Pan%2C+H">Hongyang Pan</a>, <a href="/search/?searchtype=author&query=Jiang%2C+W">Wentao Jiang</a>, <a href="/search/?searchtype=author&query=Yang%2C+F">Fan Yang</a>, <a href="/search/?searchtype=author&query=Yang%2C+X">Xiaoyan Yang</a>, <a href="/search/?searchtype=author&query=Chu%2C+Z">Zhufei Chu</a>, <a href="/search/?searchtype=author&query=Xu%2C+Q">Qiang Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.06816v1-abstract-short" style="display: inline;"> Representation learning for post-mapping (PM) netlists is a critical challenge in Electronic Design Automation (EDA), driven by the diverse and complex nature of modern circuit designs. Existing approaches focus on intermediate representations like And-Inverter Graphs (AIGs), limiting their applicability to post-synthesis stages. We introduce DeepCell, a multiview representation learning framework… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06816v1-abstract-full').style.display = 'inline'; document.getElementById('2502.06816v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.06816v1-abstract-full" style="display: none;"> Representation learning for post-mapping (PM) netlists is a critical challenge in Electronic Design Automation (EDA), driven by the diverse and complex nature of modern circuit designs. Existing approaches focus on intermediate representations like And-Inverter Graphs (AIGs), limiting their applicability to post-synthesis stages. We introduce DeepCell, a multiview representation learning framework that integrates structural and functional insights from both PM netlists and AIGs to learn rich, generalizable embeddings. At its core, DeepCell employs the novel Mask Circuit Modeling (MCM) mechanism, which refines PM netlist representations in a self-supervised manner using pretrained AIG encoders. DeepCell sets a new benchmark in PM netlist representation, outperforming existing methods in predictive accuracy and reconstruction fidelity. To validate its efficacy, we apply DeepCell to functional Engineering Change Orders (ECO), achieving significant reductions in patch generation costs and runtime while improving patch quality. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06816v1-abstract-full').style.display = 'none'; document.getElementById('2502.06816v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.06318">arXiv:2502.06318</a> <span> [<a href="https://arxiv.org/pdf/2502.06318">pdf</a>, <a href="https://arxiv.org/format/2502.06318">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> Tracezip: Efficient Distributed Tracing via Trace Compression </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Chen%2C+Z">Zhuangbin Chen</a>, <a href="/search/?searchtype=author&query=Pu%2C+J">Junsong Pu</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zibin Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.06318v1-abstract-short" style="display: inline;"> Distributed tracing serves as a fundamental building block in the monitoring and testing of cloud service systems. To reduce computational and storage overheads, the de facto practice is to capture fewer traces via sampling. However, existing work faces a trade-off between the completeness of tracing and system overhead. On one hand, head-based sampling indiscriminately selects requests to trace w… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06318v1-abstract-full').style.display = 'inline'; document.getElementById('2502.06318v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.06318v1-abstract-full" style="display: none;"> Distributed tracing serves as a fundamental building block in the monitoring and testing of cloud service systems. To reduce computational and storage overheads, the de facto practice is to capture fewer traces via sampling. However, existing work faces a trade-off between the completeness of tracing and system overhead. On one hand, head-based sampling indiscriminately selects requests to trace when they enter the system, which may miss critical events. On the other hand, tail-based sampling traces all requests and selectively persist the edge-case traces, which entails the overheads related to trace collection and ingestion. Taking a different path, in this paper we propose Tracezip to enhance the efficiency of distributed tracing via trace compression. Our key insight is that there exists significant redundancy among traces, which results in repetitive transmission of identical data between the services and backend. We design a new data structure named Span Retrieval Tree (SRT) that continuously encapsulates such redundancy at the service side and transforms trace spans into a lightweight form. At the backend, the full traces can be seamlessly reconstructed by retrieving the common data already delivered by previous spans. Tracezip includes a series of strategies to optimize the structure of SRT and a differential update mechanism to efficiently synchronize SRT between services and backend. Our evaluation on microservices benchmarks, popular cloud service systems, and production trace data demonstrate that Tracezip can achieve substantial performance gains in trace collection, with negligible overhead. We have implemented Tracezip inside OpenTelemetry Collector, making it compatible with existing tracing APIs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06318v1-abstract-full').style.display = 'none'; document.getElementById('2502.06318v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by The 34th ACM SIGSOFT International Symposium on Software Testing and Analysis (ISSTA 2025)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.05602">arXiv:2502.05602</a> <span> [<a href="https://arxiv.org/pdf/2502.05602">pdf</a>, <a href="https://arxiv.org/format/2502.05602">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> UbiMoE: A Ubiquitous Mixture-of-Experts Vision Transformer Accelerator With Hybrid Computation Pattern on FPGA </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Dong%2C+J">Jiale Dong</a>, <a href="/search/?searchtype=author&query=Lou%2C+W">Wenqi Lou</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zhendong Zheng</a>, <a href="/search/?searchtype=author&query=Qin%2C+Y">Yunji Qin</a>, <a href="/search/?searchtype=author&query=Gong%2C+L">Lei Gong</a>, <a href="/search/?searchtype=author&query=Wang%2C+C">Chao Wang</a>, <a href="/search/?searchtype=author&query=Zhou%2C+X">Xuehai Zhou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.05602v3-abstract-short" style="display: inline;"> Compared to traditional Vision Transformers (ViT), Mixture-of-Experts Vision Transformers (MoE-ViT) are introduced to scale model size without a proportional increase in computational complexity, making them a new research focus. Given the high performance and reconfigurability, FPGA-based accelerators for MoE-ViT emerge, delivering substantial gains over general-purpose processors. However, exist… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.05602v3-abstract-full').style.display = 'inline'; document.getElementById('2502.05602v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.05602v3-abstract-full" style="display: none;"> Compared to traditional Vision Transformers (ViT), Mixture-of-Experts Vision Transformers (MoE-ViT) are introduced to scale model size without a proportional increase in computational complexity, making them a new research focus. Given the high performance and reconfigurability, FPGA-based accelerators for MoE-ViT emerge, delivering substantial gains over general-purpose processors. However, existing accelerators often fall short of fully exploring the design space, leading to suboptimal trade-offs between resource utilization and performance. To overcome this problem, we introduce UbiMoE, a novel end-to-end FPGA accelerator tailored for MoE-ViT. Leveraging the unique computational and memory access patterns of MoE-ViTs, we develop a latency-optimized streaming attention kernel and a resource-efficient reusable linear kernel, effectively balancing performance and resource consumption. To further enhance design efficiency, we propose a two-stage heuristic search algorithm that optimally tunes hardware parameters for various FPGA resource constraints. Compared to state-of-the-art (SOTA) FPGA designs, UbiMoE achieves 1.34x and 3.35x throughput improvements for MoE-ViT on Xilinx ZCU102 and Alveo U280 platforms, respectively, while enhancing energy efficiency by 1.75x and 1.54x. Our implementation is available at https://github.com/DJ000011/UbiMoE. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.05602v3-abstract-full').style.display = 'none'; document.getElementById('2502.05602v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by ISCAS 2025 (oral)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.05438">arXiv:2502.05438</a> <span> [<a href="https://arxiv.org/pdf/2502.05438">pdf</a>, <a href="https://arxiv.org/format/2502.05438">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Combinatorics">math.CO</span> </div> </div> <p class="title is-5 mathjax"> On the number of edges in saturated partial embeddings of maximal planar graphs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Bar%C3%A1t%2C+J">J谩nos Bar谩t</a>, <a href="/search/?searchtype=author&query=Bl%C3%A1zsik%2C+Z+L">Zolt谩n L. Bl谩zsik</a>, <a href="/search/?searchtype=author&query=Keszegh%2C+B">Bal谩zs Keszegh</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zeyu Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.05438v1-abstract-short" style="display: inline;"> We investigate the extremal properties of saturated partial plane embeddings of maximal planar graphs. For a planar graph $G$, the plane-saturation number $\mathrm{sat}_{\mathcal{P}}(G)$ denotes the minimum number of edges in a plane subgraph of $G$ such that the addition of any edge either violates planarity or results in a graph that is not a subgraph of $G$. We focus on maximal planar graphs an… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.05438v1-abstract-full').style.display = 'inline'; document.getElementById('2502.05438v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.05438v1-abstract-full" style="display: none;"> We investigate the extremal properties of saturated partial plane embeddings of maximal planar graphs. For a planar graph $G$, the plane-saturation number $\mathrm{sat}_{\mathcal{P}}(G)$ denotes the minimum number of edges in a plane subgraph of $G$ such that the addition of any edge either violates planarity or results in a graph that is not a subgraph of $G$. We focus on maximal planar graphs and establish an upper bound on $\mathrm{sat}_{\mathcal{P}}(G)$ by showing there exists a universal constant $蔚> 0$ such that $\mathrm{sat}_{\mathcal{P}}(G) < (3-蔚)v(G)$ for any maximal planar graph $G$ with $v(G) \geq 16$. This answers a question posed by Clifton and Simon. Additionally, we derive lower bound results and demonstrate that for maximal planar graphs with sufficiently large number of vertices, the minimum ratio $\mathrm{sat}_{\mathcal{P}}(G)/e(G)$ lies within the interval $(1/16, 1/9 + o(1)]$. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.05438v1-abstract-full').style.display = 'none'; document.getElementById('2502.05438v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 2 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.04537">arXiv:2502.04537</a> <span> [<a href="https://arxiv.org/pdf/2502.04537">pdf</a>, <a href="https://arxiv.org/format/2502.04537">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Multilingual Non-Autoregressive Machine Translation without Knowledge Distillation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Huang%2C+C">Chenyang Huang</a>, <a href="/search/?searchtype=author&query=Huang%2C+F">Fei Huang</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zaixiang Zheng</a>, <a href="/search/?searchtype=author&query=Za%C3%AFane%2C+O+R">Osmar R. Za茂ane</a>, <a href="/search/?searchtype=author&query=Zhou%2C+H">Hao Zhou</a>, <a href="/search/?searchtype=author&query=Mou%2C+L">Lili Mou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.04537v1-abstract-short" style="display: inline;"> Multilingual neural machine translation (MNMT) aims at using one single model for multiple translation directions. Recent work applies non-autoregressive Transformers to improve the efficiency of MNMT, but requires expensive knowledge distillation (KD) processes. To this end, we propose an M-DAT approach to non-autoregressive multilingual machine translation. Our system leverages the recent advanc… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.04537v1-abstract-full').style.display = 'inline'; document.getElementById('2502.04537v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.04537v1-abstract-full" style="display: none;"> Multilingual neural machine translation (MNMT) aims at using one single model for multiple translation directions. Recent work applies non-autoregressive Transformers to improve the efficiency of MNMT, but requires expensive knowledge distillation (KD) processes. To this end, we propose an M-DAT approach to non-autoregressive multilingual machine translation. Our system leverages the recent advance of the directed acyclic Transformer (DAT), which does not require KD. We further propose a pivot back-translation (PivotBT) approach to improve the generalization to unseen translation directions. Experiments show that our M-DAT achieves state-of-the-art performance in non-autoregressive MNMT. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.04537v1-abstract-full').style.display = 'none'; document.getElementById('2502.04537v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">In Findings of the Association for Computational Linguistics: IJCNLP-AACL 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.04013">arXiv:2502.04013</a> <span> [<a href="https://arxiv.org/pdf/2502.04013">pdf</a>, <a href="https://arxiv.org/format/2502.04013">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Search for resonance-enhanced $CP$ and angular asymmetries in the $螞^+_{c}\to p渭^+渭^-$ decay at LHCb </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=LHCb+collaboration"> LHCb collaboration</a>, <a href="/search/?searchtype=author&query=Aaij%2C+R">R. Aaij</a>, <a href="/search/?searchtype=author&query=Abdelmotteleb%2C+A+S+W">A. S. W. Abdelmotteleb</a>, <a href="/search/?searchtype=author&query=Beteta%2C+C+A">C. Abellan Beteta</a>, <a href="/search/?searchtype=author&query=Abudin%C3%A9n%2C+F">F. Abudin茅n</a>, <a href="/search/?searchtype=author&query=Ackernley%2C+T">T. Ackernley</a>, <a href="/search/?searchtype=author&query=Adefisoye%2C+A+A">A. A. Adefisoye</a>, <a href="/search/?searchtype=author&query=Adeva%2C+B">B. Adeva</a>, <a href="/search/?searchtype=author&query=Adinolfi%2C+M">M. Adinolfi</a>, <a href="/search/?searchtype=author&query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&query=Agapopoulou%2C+C">C. Agapopoulou</a>, <a href="/search/?searchtype=author&query=Aidala%2C+C+A">C. A. Aidala</a>, <a href="/search/?searchtype=author&query=Ajaltouni%2C+Z">Z. Ajaltouni</a>, <a href="/search/?searchtype=author&query=Akar%2C+S">S. Akar</a>, <a href="/search/?searchtype=author&query=Akiba%2C+K">K. Akiba</a>, <a href="/search/?searchtype=author&query=Albicocco%2C+P">P. Albicocco</a>, <a href="/search/?searchtype=author&query=Albrecht%2C+J">J. Albrecht</a>, <a href="/search/?searchtype=author&query=Alessio%2C+F">F. Alessio</a>, <a href="/search/?searchtype=author&query=Alexander%2C+M">M. Alexander</a>, <a href="/search/?searchtype=author&query=Aliouche%2C+Z">Z. Aliouche</a>, <a href="/search/?searchtype=author&query=Cartelle%2C+P+A">P. Alvarez Cartelle</a>, <a href="/search/?searchtype=author&query=Amalric%2C+R">R. Amalric</a>, <a href="/search/?searchtype=author&query=Amato%2C+S">S. Amato</a>, <a href="/search/?searchtype=author&query=Amey%2C+J+L">J. L. Amey</a>, <a href="/search/?searchtype=author&query=Amhis%2C+Y">Y. Amhis</a> , et al. (1127 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.04013v1-abstract-short" style="display: inline;"> The first measurement of the $CP$ asymmetry of the decay rate ($A_{CP}$) and the $CP$ average ($危A_{\text{FB}}$) and $CP$ asymmetry ($螖A_{\text{FB}}$) of the forward-backward asymmetry in the muon system of $\mathit螞^+_c\to p渭^+渭^-$ decays is reported. The measurement is performed using a data sample of proton-proton collisions, recorded by the LHCb experiment from 2016 to 2018 at a center-of-mass… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.04013v1-abstract-full').style.display = 'inline'; document.getElementById('2502.04013v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.04013v1-abstract-full" style="display: none;"> The first measurement of the $CP$ asymmetry of the decay rate ($A_{CP}$) and the $CP$ average ($危A_{\text{FB}}$) and $CP$ asymmetry ($螖A_{\text{FB}}$) of the forward-backward asymmetry in the muon system of $\mathit螞^+_c\to p渭^+渭^-$ decays is reported. The measurement is performed using a data sample of proton-proton collisions, recorded by the LHCb experiment from 2016 to 2018 at a center-of-mass energy of 13$\text{ TeV}$, which corresponds to an integrated luminosity of 5.4$\text{ fb}^{-1}$. The asymmetries are measured in two regions of dimuon mass near the $蠁$-meson mass peak. The dimuon-mass integrated results are \begin{align*} A_{CP} &= (-1.1 \pm 4.0 \pm 0.5)\%,\\ 危A_{\text{FB}} &= (\phantom{-}3.9 \pm 4.0 \pm 0.6)\%,\\ 螖A_{\text{FB}} &= (\phantom{-}3.1 \pm 4.0 \pm 0.4)\%, \end{align*} where the first uncertainty is statistical and the second systematic. The results are consistent with the conservation of $CP$ symmetry and the Standard Model expectations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.04013v1-abstract-full').style.display = 'none'; document.getElementById('2502.04013v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">All figures and tables, along with machine-readable versions and any supplementary material and additional information, are available at https://lbfence.cern.ch/alcm/public/analysis/full-details/3473/ (LHCb public pages)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> LHCb-PAPER-2024-051, CERN-EP-2024-340 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.03798">arXiv:2502.03798</a> <span> [<a href="https://arxiv.org/pdf/2502.03798">pdf</a>, <a href="https://arxiv.org/format/2502.03798">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Network-Wide Traffic Flow Estimation Across Multiple Cities with Global Open Multi-Source Data: A Large-Scale Case Study in Europe and North America </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Hu%2C+Z">Zijian Hu</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zhenjie Zheng</a>, <a href="/search/?searchtype=author&query=Menendez%2C+M">Monica Menendez</a>, <a href="/search/?searchtype=author&query=Ma%2C+W">Wei Ma</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.03798v1-abstract-short" style="display: inline;"> Network-wide traffic flow, which captures dynamic traffic volume on each link of a general network, is fundamental to smart mobility applications. However, the observed traffic flow from sensors is usually limited across the entire network due to the associated high installation and maintenance costs. To address this issue, existing research uses various supplementary data sources to compensate fo… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03798v1-abstract-full').style.display = 'inline'; document.getElementById('2502.03798v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.03798v1-abstract-full" style="display: none;"> Network-wide traffic flow, which captures dynamic traffic volume on each link of a general network, is fundamental to smart mobility applications. However, the observed traffic flow from sensors is usually limited across the entire network due to the associated high installation and maintenance costs. To address this issue, existing research uses various supplementary data sources to compensate for insufficient sensor coverage and estimate the unobserved traffic flow. Although these studies have shown promising results, the inconsistent availability and quality of supplementary data across cities make their methods typically face a trade-off challenge between accuracy and generality. In this research, we first time advocate using the Global Open Multi-Source (GOMS) data within an advanced deep learning framework to break the trade-off. The GOMS data primarily encompass geographical and demographic information, including road topology, building footprints, and population density, which can be consistently collected across cities. More importantly, these GOMS data are either causes or consequences of transportation activities, thereby creating opportunities for accurate network-wide flow estimation. Furthermore, we use map images to represent GOMS data, instead of traditional tabular formats, to capture richer and more comprehensive geographical and demographic information. To address multi-source data fusion, we develop an attention-based graph neural network that effectively extracts and synthesizes information from GOMS maps while simultaneously capturing spatiotemporal traffic dynamics from observed traffic data. A large-scale case study across 15 cities in Europe and North America was conducted. The results demonstrate stable and satisfactory estimation accuracy across these cities, which suggests that the trade-off challenge can be successfully addressed using our approach. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03798v1-abstract-full').style.display = 'none'; document.getElementById('2502.03798v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.03796">arXiv:2502.03796</a> <span> [<a href="https://arxiv.org/pdf/2502.03796">pdf</a>, <a href="https://arxiv.org/format/2502.03796">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> Exploring Uncore Frequency Scaling for Heterogeneous Computing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zhong Zheng</a>, <a href="/search/?searchtype=author&query=Sultanov%2C+S">Seyfal Sultanov</a>, <a href="/search/?searchtype=author&query=Papka%2C+M+E">Michael E. Papka</a>, <a href="/search/?searchtype=author&query=Lan%2C+Z">Zhiling Lan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.03796v1-abstract-short" style="display: inline;"> High-performance computing (HPC) systems are essential for scientific discovery and engineering innovation. However, their growing power demands pose significant challenges, particularly as systems scale to the exascale level. Prior uncore frequency tuning studies have primarily focused on conventional HPC workloads running on homogeneous systems. As HPC advances toward heterogeneous computing, in… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03796v1-abstract-full').style.display = 'inline'; document.getElementById('2502.03796v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.03796v1-abstract-full" style="display: none;"> High-performance computing (HPC) systems are essential for scientific discovery and engineering innovation. However, their growing power demands pose significant challenges, particularly as systems scale to the exascale level. Prior uncore frequency tuning studies have primarily focused on conventional HPC workloads running on homogeneous systems. As HPC advances toward heterogeneous computing, integrating diverse GPU workloads on heterogeneous CPU-GPU systems, it is crucial to revisit and enhance uncore scaling. Our investigation reveals that uncore frequency scales down only when CPU power approaches its TDP (Thermal Design Power), an uncommon scenario in GPU-dominant applications, resulting in unnecessary power waste in modern heterogeneous computing systems. To address this, we present MAGUS, a user-transparent uncore frequency scaling runtime for heterogeneous computing. Effective uncore tuning is inherently complex, requiring dynamic detection of application execution phases that affect uncore utilization. Moreover, any robust strategy must work across a diverse range of applications, each with unique behaviors and resource requirements. Finally, an efficient runtime should introduce minimal overhead. We incorporate several key techniques in the design of MAGUS, including monitoring and predicting memory throughput, managing frequent phase transitions, and leveraging vendor-supplied power management support. We evaluate MAGUS using a diverse set of GPU benchmarks and applications across multiple heterogeneous systems with different CPU and GPU architectures. The experimental results show that MAGUS achieves up to 27% energy savings and 26% energy-delay product (EDP) reduction compared to the default settings while maintaining a performance loss below 5% and an overhead under 1%. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03796v1-abstract-full').style.display = 'none'; document.getElementById('2502.03796v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.03649">arXiv:2502.03649</a> <span> [<a href="https://arxiv.org/pdf/2502.03649">pdf</a>, <a href="https://arxiv.org/format/2502.03649">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> All-in-One Image Compression and Restoration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zeng%2C+H">Huimin Zeng</a>, <a href="/search/?searchtype=author&query=Li%2C+J">Jiacheng Li</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Ziqiang Zheng</a>, <a href="/search/?searchtype=author&query=Xiong%2C+Z">Zhiwei Xiong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.03649v1-abstract-short" style="display: inline;"> Visual images corrupted by various types and levels of degradations are commonly encountered in practical image compression. However, most existing image compression methods are tailored for clean images, therefore struggling to achieve satisfying results on these images. Joint compression and restoration methods typically focus on a single type of degradation and fail to address a variety of degr… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03649v1-abstract-full').style.display = 'inline'; document.getElementById('2502.03649v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.03649v1-abstract-full" style="display: none;"> Visual images corrupted by various types and levels of degradations are commonly encountered in practical image compression. However, most existing image compression methods are tailored for clean images, therefore struggling to achieve satisfying results on these images. Joint compression and restoration methods typically focus on a single type of degradation and fail to address a variety of degradations in practice. To this end, we propose a unified framework for all-in-one image compression and restoration, which incorporates the image restoration capability against various degradations into the process of image compression. The key challenges involve distinguishing authentic image content from degradations, and flexibly eliminating various degradations without prior knowledge. Specifically, the proposed framework approaches these challenges from two perspectives: i.e., content information aggregation, and degradation representation aggregation. Extensive experiments demonstrate the following merits of our model: 1) superior rate-distortion (RD) performance on various degraded inputs while preserving the performance on clean data; 2) strong generalization ability to real-world and unseen scenarios; 3) higher computing efficiency over compared methods. Our code is available at https://github.com/ZeldaM1/All-in-one. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03649v1-abstract-full').style.display = 'none'; document.getElementById('2502.03649v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to WACV 2025 (oral)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.03264">arXiv:2502.03264</a> <span> [<a href="https://arxiv.org/pdf/2502.03264">pdf</a>, <a href="https://arxiv.org/format/2502.03264">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> General Time-series Model for Universal Knowledge Representation of Multivariate Time-Series data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=He%2C+C">Cheng He</a>, <a href="/search/?searchtype=author&query=Huang%2C+X">Xu Huang</a>, <a href="/search/?searchtype=author&query=Jiang%2C+G">Gangwei Jiang</a>, <a href="/search/?searchtype=author&query=Li%2C+Z">Zhaoyi Li</a>, <a href="/search/?searchtype=author&query=Lian%2C+D">Defu Lian</a>, <a href="/search/?searchtype=author&query=Xie%2C+H">Hong Xie</a>, <a href="/search/?searchtype=author&query=Chen%2C+E">Enhong Chen</a>, <a href="/search/?searchtype=author&query=Liang%2C+X">Xijie Liang</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zengrong Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.03264v1-abstract-short" style="display: inline;"> Universal knowledge representation is a central problem for multivariate time series(MTS) foundation models and yet remains open. This paper investigates this problem from the first principle and it makes four folds of contributions. First, a new empirical finding is revealed: time series with different time granularities (or corresponding frequency resolutions) exhibit distinct joint distribution… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03264v1-abstract-full').style.display = 'inline'; document.getElementById('2502.03264v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.03264v1-abstract-full" style="display: none;"> Universal knowledge representation is a central problem for multivariate time series(MTS) foundation models and yet remains open. This paper investigates this problem from the first principle and it makes four folds of contributions. First, a new empirical finding is revealed: time series with different time granularities (or corresponding frequency resolutions) exhibit distinct joint distributions in the frequency domain. This implies a crucial aspect of learning universal knowledge, one that has been overlooked by previous studies. Second, a novel Fourier knowledge attention mechanism is proposed to enable learning time granularity-aware representations from both the temporal and frequency domains. Third, an autoregressive blank infilling pre-training framework is incorporated to time series analysis for the first time, leading to a generative tasks agnostic pre-training strategy. To this end, we develop the General Time-series Model (GTM), a unified MTS foundation model that addresses the limitation of contemporary time series models, which often require token, pre-training, or model-level customizations for downstream tasks adaption. Fourth, extensive experiments show that GTM outperforms state-of-the-art (SOTA) methods across all generative tasks, including long-term forecasting, anomaly detection, and imputation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03264v1-abstract-full').style.display = 'none'; document.getElementById('2502.03264v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.03110">arXiv:2502.03110</a> <span> [<a href="https://arxiv.org/pdf/2502.03110">pdf</a>, <a href="https://arxiv.org/format/2502.03110">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Dual-Polarized Intelligent Omni-Surfaces for Independent Reflective-Refractive Transmission </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zizhou Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.03110v1-abstract-short" style="display: inline;"> Intelligent omni-surface (IOS), which are capable of providing service coverage to mobile users (MUs) in a reflective and a refractive manner, has recently attracted widespread attention. However, the performance of traditionally IOS-aid systems is limited by the intimate coupling between the refraction and reflection behavior of IOS elements. In this letter, to overcome this challenge, we introdu… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03110v1-abstract-full').style.display = 'inline'; document.getElementById('2502.03110v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.03110v1-abstract-full" style="display: none;"> Intelligent omni-surface (IOS), which are capable of providing service coverage to mobile users (MUs) in a reflective and a refractive manner, has recently attracted widespread attention. However, the performance of traditionally IOS-aid systems is limited by the intimate coupling between the refraction and reflection behavior of IOS elements. In this letter, to overcome this challenge, we introduce the concept of dual-polarized IOS-assisted communication. More precisely, by employing the polarization domain in the design of IOS, full independent refraction and reflection modes can be delivered. We consider a downlink dual-polarized IOS-aided system, while also accounting for the leakage between different polarizations. To maximize the sum rate, we formulate a joint IOS phase shift and BS beamforming problem and proposed an iterative algorithm to solve the non-convex program. Simulation results validate that dual-polarized IOS significantly enhances the performance than the traditional one. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.03110v1-abstract-full').style.display = 'none'; document.getElementById('2502.03110v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.02859">arXiv:2502.02859</a> <span> [<a href="https://arxiv.org/pdf/2502.02859">pdf</a>, <a href="https://arxiv.org/format/2502.02859">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Gap-Dependent Bounds for Federated $Q$-learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zhang%2C+H">Haochen Zhang</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zhong Zheng</a>, <a href="/search/?searchtype=author&query=Xue%2C+L">Lingzhou Xue</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.02859v1-abstract-short" style="display: inline;"> We present the first gap-dependent analysis of regret and communication cost for on-policy federated $Q$-Learning in tabular episodic finite-horizon Markov decision processes (MDPs). Existing FRL methods focus on worst-case scenarios, leading to $\sqrt{T}$-type regret bounds and communication cost bounds with a $\log T$ term scaling with the number of agents $M$, states $S$, and actions $A$, where… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.02859v1-abstract-full').style.display = 'inline'; document.getElementById('2502.02859v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.02859v1-abstract-full" style="display: none;"> We present the first gap-dependent analysis of regret and communication cost for on-policy federated $Q$-Learning in tabular episodic finite-horizon Markov decision processes (MDPs). Existing FRL methods focus on worst-case scenarios, leading to $\sqrt{T}$-type regret bounds and communication cost bounds with a $\log T$ term scaling with the number of agents $M$, states $S$, and actions $A$, where $T$ is the average total number of steps per agent. In contrast, our novel framework leverages the benign structures of MDPs, such as a strictly positive suboptimality gap, to achieve a $\log T$-type regret bound and a refined communication cost bound that disentangles exploration and exploitation. Our gap-dependent regret bound reveals a distinct multi-agent speedup pattern, and our gap-dependent communication cost bound removes the dependence on $MSA$ from the $\log T$ term. Notably, our gap-dependent communication cost bound also yields a better global switching cost when $M=1$, removing $SA$ from the $\log T$ term. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.02859v1-abstract-full').style.display = 'none'; document.getElementById('2502.02859v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.02341">arXiv:2502.02341</a> <span> [<a href="https://arxiv.org/pdf/2502.02341">pdf</a>, <a href="https://arxiv.org/format/2502.02341">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Test Time Training for 4D Medical Image Interpolation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zhang%2C+Q">Qikang Zhang</a>, <a href="/search/?searchtype=author&query=Lei%2C+Y">Yingjie Lei</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zihao Zheng</a>, <a href="/search/?searchtype=author&query=Chen%2C+Z">Ziyang Chen</a>, <a href="/search/?searchtype=author&query=Xie%2C+Z">Zhonghao Xie</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.02341v1-abstract-short" style="display: inline;"> 4D medical image interpolation is essential for improving temporal resolution and diagnostic precision in clinical applications. Previous works ignore the problem of distribution shifts, resulting in poor generalization under different distribution. A natural solution would be to adapt the model to a new test distribution, but this cannot be done if the test input comes without a ground truth labe… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.02341v1-abstract-full').style.display = 'inline'; document.getElementById('2502.02341v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.02341v1-abstract-full" style="display: none;"> 4D medical image interpolation is essential for improving temporal resolution and diagnostic precision in clinical applications. Previous works ignore the problem of distribution shifts, resulting in poor generalization under different distribution. A natural solution would be to adapt the model to a new test distribution, but this cannot be done if the test input comes without a ground truth label. In this paper, we propose a novel test time training framework which uses self-supervision to adapt the model to a new distribution without requiring any labels. Indeed, before performing frame interpolation on each test video, the model is trained on the same instance using a self-supervised task, such as rotation prediction or image reconstruction. We conduct experiments on two publicly available 4D medical image interpolation datasets, Cardiac and 4D-Lung. The experimental results show that the proposed method achieves significant performance across various evaluation metrics on both datasets. It achieves higher peak signal-to-noise ratio values, 33.73dB on Cardiac and 34.02dB on 4D-Lung. Our method not only advances 4D medical image interpolation but also provides a template for domain adaptation in other fields such as image segmentation and image registration. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.02341v1-abstract-full').style.display = 'none'; document.getElementById('2502.02341v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.01681">arXiv:2502.01681</a> <span> [<a href="https://arxiv.org/pdf/2502.01681">pdf</a>, <a href="https://arxiv.org/format/2502.01681">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> DeepGate4: Efficient and Effective Representation Learning for Circuit Design at Scale </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zheng%2C+Z">Ziyang Zheng</a>, <a href="/search/?searchtype=author&query=Huang%2C+S">Shan Huang</a>, <a href="/search/?searchtype=author&query=Zhong%2C+J">Jianyuan Zhong</a>, <a href="/search/?searchtype=author&query=Shi%2C+Z">Zhengyuan Shi</a>, <a href="/search/?searchtype=author&query=Dai%2C+G">Guohao Dai</a>, <a href="/search/?searchtype=author&query=Xu%2C+N">Ningyi Xu</a>, <a href="/search/?searchtype=author&query=Xu%2C+Q">Qiang Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.01681v2-abstract-short" style="display: inline;"> Circuit representation learning has become pivotal in electronic design automation, enabling critical tasks such as testability analysis, logic reasoning, power estimation, and SAT solving. However, existing models face significant challenges in scaling to large circuits due to limitations like over-squashing in graph neural networks and the quadratic complexity of transformer-based models. To add… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.01681v2-abstract-full').style.display = 'inline'; document.getElementById('2502.01681v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.01681v2-abstract-full" style="display: none;"> Circuit representation learning has become pivotal in electronic design automation, enabling critical tasks such as testability analysis, logic reasoning, power estimation, and SAT solving. However, existing models face significant challenges in scaling to large circuits due to limitations like over-squashing in graph neural networks and the quadratic complexity of transformer-based models. To address these issues, we introduce DeepGate4, a scalable and efficient graph transformer specifically designed for large-scale circuits. DeepGate4 incorporates several key innovations: (1) an update strategy tailored for circuit graphs, which reduce memory complexity to sub-linear and is adaptable to any graph transformer; (2) a GAT-based sparse transformer with global and local structural encodings for AIGs; and (3) an inference acceleration CUDA kernel that fully exploit the unique sparsity patterns of AIGs. Our extensive experiments on the ITC99 and EPFL benchmarks show that DeepGate4 significantly surpasses state-of-the-art methods, achieving 15.5% and 31.1% performance improvements over the next-best models. Furthermore, the Fused-DeepGate4 variant reduces runtime by 35.1% and memory usage by 46.8%, making it highly efficient for large-scale circuit analysis. These results demonstrate the potential of DeepGate4 to handle complex EDA tasks while offering superior scalability and efficiency. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.01681v2-abstract-full').style.display = 'none'; document.getElementById('2502.01681v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.01061">arXiv:2502.01061</a> <span> [<a href="https://arxiv.org/pdf/2502.01061">pdf</a>, <a href="https://arxiv.org/format/2502.01061">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> OmniHuman-1: Rethinking the Scaling-Up of One-Stage Conditioned Human Animation Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Lin%2C+G">Gaojie Lin</a>, <a href="/search/?searchtype=author&query=Jiang%2C+J">Jianwen Jiang</a>, <a href="/search/?searchtype=author&query=Yang%2C+J">Jiaqi Yang</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zerong Zheng</a>, <a href="/search/?searchtype=author&query=Liang%2C+C">Chao Liang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.01061v2-abstract-short" style="display: inline;"> End-to-end human animation, such as audio-driven talking human generation, has undergone notable advancements in the recent few years. However, existing methods still struggle to scale up as large general video generation models, limiting their potential in real applications. In this paper, we propose OmniHuman, a Diffusion Transformer-based framework that scales up data by mixing motion-related c… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.01061v2-abstract-full').style.display = 'inline'; document.getElementById('2502.01061v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.01061v2-abstract-full" style="display: none;"> End-to-end human animation, such as audio-driven talking human generation, has undergone notable advancements in the recent few years. However, existing methods still struggle to scale up as large general video generation models, limiting their potential in real applications. In this paper, we propose OmniHuman, a Diffusion Transformer-based framework that scales up data by mixing motion-related conditions into the training phase. To this end, we introduce two training principles for these mixed conditions, along with the corresponding model architecture and inference strategy. These designs enable OmniHuman to fully leverage data-driven motion generation, ultimately achieving highly realistic human video generation. More importantly, OmniHuman supports various portrait contents (face close-up, portrait, half-body, full-body), supports both talking and singing, handles human-object interactions and challenging body poses, and accommodates different image styles. Compared to existing end-to-end audio-driven methods, OmniHuman not only produces more realistic videos, but also offers greater flexibility in inputs. It also supports multiple driving modalities (audio-driven, video-driven and combined driving signals). Video samples are provided on the ttfamily project page (https://omnihuman-lab.github.io) <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.01061v2-abstract-full').style.display = 'none'; document.getElementById('2502.01061v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">https://omnihuman-lab.github.io/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.00264">arXiv:2502.00264</a> <span> [<a href="https://arxiv.org/pdf/2502.00264">pdf</a>, <a href="https://arxiv.org/format/2502.00264">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Beyond the Permutation Symmetry of Transformers: The Role of Rotation for Model Fusion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zhang%2C+B">Binchi Zhang</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zaiyi Zheng</a>, <a href="/search/?searchtype=author&query=Chen%2C+Z">Zhengzhang Chen</a>, <a href="/search/?searchtype=author&query=Li%2C+J">Jundong Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.00264v1-abstract-short" style="display: inline;"> Symmetry in the parameter space of deep neural networks (DNNs) has proven beneficial for various deep learning applications. A well-known example is the permutation symmetry in Multi-Layer Perceptrons (MLPs), where permuting the rows of weight matrices in one layer and applying the inverse permutation to adjacent layers yields a functionally equivalent model. While permutation symmetry fully chara… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.00264v1-abstract-full').style.display = 'inline'; document.getElementById('2502.00264v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.00264v1-abstract-full" style="display: none;"> Symmetry in the parameter space of deep neural networks (DNNs) has proven beneficial for various deep learning applications. A well-known example is the permutation symmetry in Multi-Layer Perceptrons (MLPs), where permuting the rows of weight matrices in one layer and applying the inverse permutation to adjacent layers yields a functionally equivalent model. While permutation symmetry fully characterizes the equivalence set for MLPs, its discrete nature limits its utility for transformers. In this paper, we introduce rotation symmetry, a novel form of parameter space symmetry for transformers that generalizes permutation symmetry by rotating parameter matrices in self-attention layers. Unlike permutation symmetry, rotation symmetry operates in a continuous domain, thereby significantly expanding the equivalence set for transformers. Based on this property, we propose a theoretically optimal parameter matching algorithm as a plug-and-play module to enhance model fusion. We evaluate our approach using pre-trained transformers across diverse natural language and vision tasks. Experimental results demonstrate that our rotation symmetry-based matching algorithm substantially improves model fusion, highlighting the potential of parameter space symmetry to facilitate model fusion. Our code is available on https://github.com/zhengzaiyi/RotationSymmetry. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.00264v1-abstract-full').style.display = 'none'; document.getElementById('2502.00264v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.00158">arXiv:2502.00158</a> <span> [<a href="https://arxiv.org/pdf/2502.00158">pdf</a>, <a href="https://arxiv.org/format/2502.00158">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Resolving Editing-Unlearning Conflicts: A Knowledge Codebook Framework for Large Language Model Updating </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zhang%2C+B">Binchi Zhang</a>, <a href="/search/?searchtype=author&query=Chen%2C+Z">Zhengzhang Chen</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zaiyi Zheng</a>, <a href="/search/?searchtype=author&query=Li%2C+J">Jundong Li</a>, <a href="/search/?searchtype=author&query=Chen%2C+H">Haifeng Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.00158v1-abstract-short" style="display: inline;"> Large Language Models (LLMs) excel in natural language processing by encoding extensive human knowledge, but their utility relies on timely updates as knowledge evolves. Updating LLMs involves two key tasks simultaneously: unlearning to remove unwanted knowledge and editing to incorporate new information. Existing methods face two major challenges: ineffective knowledge storage (either too sparse… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.00158v1-abstract-full').style.display = 'inline'; document.getElementById('2502.00158v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.00158v1-abstract-full" style="display: none;"> Large Language Models (LLMs) excel in natural language processing by encoding extensive human knowledge, but their utility relies on timely updates as knowledge evolves. Updating LLMs involves two key tasks simultaneously: unlearning to remove unwanted knowledge and editing to incorporate new information. Existing methods face two major challenges: ineffective knowledge storage (either too sparse or too dense) and task conflicts between editing and unlearning, as validated through our theoretical and experimental results. To address these issues, we propose LOKA, a conflict-free framework for LLM updating based on a knowledge codebook. During training, updated knowledge is stored in multiple codebook memories. To optimize knowledge storage, a similarity-aware knowledge mapping ensures that related knowledge pieces are clustered and allocated to the same memory. Additionally, LOKA resolves task conflicts by employing task-specific and multi-task memories guided by a conflict score. In the inference stage, LOKA retrieves the most relevant memory from the codebook and plugs it into the original LLM to apply the updated knowledge. A learning-based router controls codebook activation to further improve knowledge utilization. Extensive experiments demonstrate the effectiveness of LOKA in LLM knowledge updating tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.00158v1-abstract-full').style.display = 'none'; document.getElementById('2502.00158v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.17992">arXiv:2501.17992</a> <span> [<a href="https://arxiv.org/pdf/2501.17992">pdf</a>, <a href="https://arxiv.org/format/2501.17992">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Portfolio Management">q-fin.PM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Reinforcement-Learning Portfolio Allocation with Dynamic Embedding of Market Information </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=He%2C+J">Jinghai He</a>, <a href="/search/?searchtype=author&query=Hua%2C+C">Cheng Hua</a>, <a href="/search/?searchtype=author&query=Zhou%2C+C">Chunyang Zhou</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zeyu Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.17992v1-abstract-short" style="display: inline;"> We develop a portfolio allocation framework that leverages deep learning techniques to address challenges arising from high-dimensional, non-stationary, and low-signal-to-noise market information. Our approach includes a dynamic embedding method that reduces the non-stationary, high-dimensional state space into a lower-dimensional representation. We design a reinforcement learning (RL) framework t… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.17992v1-abstract-full').style.display = 'inline'; document.getElementById('2501.17992v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.17992v1-abstract-full" style="display: none;"> We develop a portfolio allocation framework that leverages deep learning techniques to address challenges arising from high-dimensional, non-stationary, and low-signal-to-noise market information. Our approach includes a dynamic embedding method that reduces the non-stationary, high-dimensional state space into a lower-dimensional representation. We design a reinforcement learning (RL) framework that integrates generative autoencoders and online meta-learning to dynamically embed market information, enabling the RL agent to focus on the most impactful parts of the state space for portfolio allocation decisions. Empirical analysis based on the top 500 U.S. stocks demonstrates that our framework outperforms common portfolio benchmarks and the predict-then-optimize (PTO) approach using machine learning, particularly during periods of market stress. Traditional factor models do not fully explain this superior performance. The framework's ability to time volatility reduces its market exposure during turbulent times. Ablation studies confirm the robustness of this performance across various reinforcement learning algorithms. Additionally, the embedding and meta-learning techniques effectively manage the complexities of high-dimensional, noisy, and non-stationary financial data, enhancing both portfolio performance and risk management. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.17992v1-abstract-full').style.display = 'none'; document.getElementById('2501.17992v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.17555">arXiv:2501.17555</a> <span> [<a href="https://arxiv.org/pdf/2501.17555">pdf</a>, <a href="https://arxiv.org/format/2501.17555">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> An Exceptional Dataset For Rare Pancreatic Tumor Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Li%2C+W">Wenqi Li</a>, <a href="/search/?searchtype=author&query=Chen%2C+Y">Yingli Chen</a>, <a href="/search/?searchtype=author&query=Zhou%2C+K">Keyang Zhou</a>, <a href="/search/?searchtype=author&query=Hu%2C+X">Xiaoxiao Hu</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zilu Zheng</a>, <a href="/search/?searchtype=author&query=Yan%2C+Y">Yue Yan</a>, <a href="/search/?searchtype=author&query=Zhang%2C+X">Xinpeng Zhang</a>, <a href="/search/?searchtype=author&query=Tang%2C+W">Wei Tang</a>, <a href="/search/?searchtype=author&query=Qian%2C+Z">Zhenxing Qian</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.17555v1-abstract-short" style="display: inline;"> Pancreatic NEuroendocrine Tumors (pNETs) are very rare endocrine neoplasms that account for less than 5% of all pancreatic malignancies, with an incidence of only 1-1.5 cases per 100,000. Early detection of pNETs is critical for improving patient survival, but the rarity of pNETs makes segmenting them from CT a very challenging problem. So far, there has not been a dataset specifically for pNETs a… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.17555v1-abstract-full').style.display = 'inline'; document.getElementById('2501.17555v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.17555v1-abstract-full" style="display: none;"> Pancreatic NEuroendocrine Tumors (pNETs) are very rare endocrine neoplasms that account for less than 5% of all pancreatic malignancies, with an incidence of only 1-1.5 cases per 100,000. Early detection of pNETs is critical for improving patient survival, but the rarity of pNETs makes segmenting them from CT a very challenging problem. So far, there has not been a dataset specifically for pNETs available to researchers. To address this issue, we propose a pNETs dataset, a well-annotated Contrast-Enhanced Computed Tomography (CECT) dataset focused exclusively on Pancreatic Neuroendocrine Tumors, containing data from 469 patients. This is the first dataset solely dedicated to pNETs, distinguishing it from previous collections. Additionally, we provide the baseline detection networks with a new slice-wise weight loss function designed for the UNet-based model, improving the overall pNET segmentation performance. We hope that our dataset can enhance the understanding and diagnosis of pNET Tumors within the medical community, facilitate the development of more accurate diagnostic tools, and ultimately improve patient outcomes and advance the field of oncology. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.17555v1-abstract-full').style.display = 'none'; document.getElementById('2501.17555v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.17469">arXiv:2501.17469</a> <span> [<a href="https://arxiv.org/pdf/2501.17469">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Mathematical Physics">math-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1103/PhysRevA.111.012624">10.1103/PhysRevA.111.012624 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Detecting quantum steering in networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Li%2C+M">Ming-Xiao Li</a>, <a href="/search/?searchtype=author&query=Li%2C+Y">Yuqi Li</a>, <a href="/search/?searchtype=author&query=Xi%2C+Y">Ya Xi</a>, <a href="/search/?searchtype=author&query=Zhang%2C+C">Chang-Yue Zhang</a>, <a href="/search/?searchtype=author&query=Wang%2C+Y">Ying-Zheng Wang</a>, <a href="/search/?searchtype=author&query=Xu%2C+R">Rui-Bin Xu</a>, <a href="/search/?searchtype=author&query=Fei%2C+S">Shao-Ming Fei</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zhu-Jun Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.17469v1-abstract-short" style="display: inline;"> Quantum networks promise an unprecedented leap in semi-device-independent communication and security by capitalizing on quantum steering. However, current methods for assessing quantum network steering are constrained to specific cases. In this work, we introduce the network-Clauser-Horn-Shimony-Holt-like inequality for investigating network steering independent of entanglement source characterist… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.17469v1-abstract-full').style.display = 'inline'; document.getElementById('2501.17469v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.17469v1-abstract-full" style="display: none;"> Quantum networks promise an unprecedented leap in semi-device-independent communication and security by capitalizing on quantum steering. However, current methods for assessing quantum network steering are constrained to specific cases. In this work, we introduce the network-Clauser-Horn-Shimony-Holt-like inequality for investigating network steering independent of entanglement source characteristics. We employ this inequality to detect full network steering in both single-node and multinode repeater networks and assess the tolerance of various noise models. Under a specific noise model, our method is used to compute the bound of semi-device-independent communication distance. Through case studies, we also demonstrate that our method, as a semi-device-independent entanglement witness, is more suitable for settings requiring Bell measurements compared to network Bell inequality. These findings open new avenues for broader ways to detect quantum steering independent of entanglement sources. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.17469v1-abstract-full').style.display = 'none'; document.getElementById('2501.17469v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 7 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Physical Review A 111, 012624 (2025) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.16952">arXiv:2501.16952</a> <span> [<a href="https://arxiv.org/pdf/2501.16952">pdf</a>, <a href="https://arxiv.org/format/2501.16952">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Multiple Abstraction Level Retrieve Augment Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zheng Zheng</a>, <a href="/search/?searchtype=author&query=Ni%2C+X">Xinyi Ni</a>, <a href="/search/?searchtype=author&query=Hong%2C+P">Pengyu Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.16952v1-abstract-short" style="display: inline;"> A Retrieval-Augmented Generation (RAG) model powered by a large language model (LLM) provides a faster and more cost-effective solution for adapting to new data and knowledge. It also delivers more specialized responses compared to pre-trained LLMs. However, most existing approaches rely on retrieving prefix-sized chunks as references to support question-answering (Q/A). This approach is often dep… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.16952v1-abstract-full').style.display = 'inline'; document.getElementById('2501.16952v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.16952v1-abstract-full" style="display: none;"> A Retrieval-Augmented Generation (RAG) model powered by a large language model (LLM) provides a faster and more cost-effective solution for adapting to new data and knowledge. It also delivers more specialized responses compared to pre-trained LLMs. However, most existing approaches rely on retrieving prefix-sized chunks as references to support question-answering (Q/A). This approach is often deployed to address information needs at a single level of abstraction, as it struggles to generate answers across multiple levels of abstraction. In an RAG setting, while LLMs can summarize and answer questions effectively when provided with sufficient details, retrieving excessive information often leads to the 'lost in the middle' problem and exceeds token limitations. We propose a novel RAG approach that uses chunks of multiple abstraction levels (MAL), including multi-sentence-level, paragraph-level, section-level, and document-level. The effectiveness of our approach is demonstrated in an under-explored scientific domain of Glycoscience. Compared to traditional single-level RAG approaches, our approach improves AI evaluated answer correctness of Q/A by 25.739\% on Glyco-related papers. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.16952v1-abstract-full').style.display = 'none'; document.getElementById('2501.16952v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.16699">arXiv:2501.16699</a> <span> [<a href="https://arxiv.org/pdf/2501.16699">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Mesoscale and Nanoscale Physics">cond-mat.mes-hall</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Strongly Correlated Electrons">cond-mat.str-el</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Superconductivity">cond-mat.supr-con</span> </div> </div> <p class="title is-5 mathjax"> Unconventional Superconducting Phase Diagram of Monolayer WTe2 </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Song%2C+T">Tiancheng Song</a>, <a href="/search/?searchtype=author&query=Jia%2C+Y">Yanyu Jia</a>, <a href="/search/?searchtype=author&query=Yu%2C+G">Guo Yu</a>, <a href="/search/?searchtype=author&query=Tang%2C+Y">Yue Tang</a>, <a href="/search/?searchtype=author&query=Uzan%2C+A+J">Ayelet J. Uzan</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z+J">Zhaoyi Joy Zheng</a>, <a href="/search/?searchtype=author&query=Guan%2C+H">Haosen Guan</a>, <a href="/search/?searchtype=author&query=Onyszczak%2C+M">Michael Onyszczak</a>, <a href="/search/?searchtype=author&query=Singha%2C+R">Ratnadwip Singha</a>, <a href="/search/?searchtype=author&query=Gui%2C+X">Xin Gui</a>, <a href="/search/?searchtype=author&query=Watanabe%2C+K">Kenji Watanabe</a>, <a href="/search/?searchtype=author&query=Taniguchi%2C+T">Takashi Taniguchi</a>, <a href="/search/?searchtype=author&query=Cava%2C+R+J">Robert J. Cava</a>, <a href="/search/?searchtype=author&query=Schoop%2C+L+M">Leslie M. Schoop</a>, <a href="/search/?searchtype=author&query=Ong%2C+N+P">N. P. Ong</a>, <a href="/search/?searchtype=author&query=Wu%2C+S">Sanfeng Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.16699v1-abstract-short" style="display: inline;"> The existence of a quantum critical point (QCP) and fluctuations around it are believed to be important for understanding the phase diagram in unconventional superconductors such as cuprates, iron pnictides, and heavy fermion superconductors. However, the QCP is usually buried deep within the superconducting dome and is difficult to investigate. The connection between quantum critical fluctuations… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.16699v1-abstract-full').style.display = 'inline'; document.getElementById('2501.16699v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.16699v1-abstract-full" style="display: none;"> The existence of a quantum critical point (QCP) and fluctuations around it are believed to be important for understanding the phase diagram in unconventional superconductors such as cuprates, iron pnictides, and heavy fermion superconductors. However, the QCP is usually buried deep within the superconducting dome and is difficult to investigate. The connection between quantum critical fluctuations and superconductivity remains an outstanding problem in condensed matter. Here combining both electrical transport and Nernst experiments, we explicitly demonstrate the onset of superconductivity at an unconventional QCP in gate-tuned monolayer tungsten ditelluride (WTe2), with features incompatible with the conventional Bardeen-Cooper-Schrieffer (BCS) scenario. The results lead to a novel superconducting phase diagram that is distinguished from other known superconductors. Two distinct gate-tuned quantum phase transitions are observed at the ends of the superconducting dome. We find that quantum fluctuations around the QCP of the underdoped regime are essential for understanding how the monolayer superconductivity is established. The unconventional phase diagram we report here illustrates a previously unknown relation between superconductivity and QCP. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.16699v1-abstract-full').style.display = 'none'; document.getElementById('2501.16699v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.15493">arXiv:2501.15493</a> <span> [<a href="https://arxiv.org/pdf/2501.15493">pdf</a>, <a href="https://arxiv.org/format/2501.15493">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> RLER-TTE: An Efficient and Effective Framework for En Route Travel Time Estimation with Reinforcement Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zhihan Zheng</a>, <a href="/search/?searchtype=author&query=Yuan%2C+H">Haitao Yuan</a>, <a href="/search/?searchtype=author&query=Chen%2C+M">Minxiao Chen</a>, <a href="/search/?searchtype=author&query=Wang%2C+S">Shangguang Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.15493v1-abstract-short" style="display: inline;"> En Route Travel Time Estimation (ER-TTE) aims to learn driving patterns from traveled routes to achieve rapid and accurate real-time predictions. However, existing methods ignore the complexity and dynamism of real-world traffic systems, resulting in significant gaps in efficiency and accuracy in real-time scenarios. Addressing this issue is a critical yet challenging task. This paper proposes a n… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.15493v1-abstract-full').style.display = 'inline'; document.getElementById('2501.15493v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.15493v1-abstract-full" style="display: none;"> En Route Travel Time Estimation (ER-TTE) aims to learn driving patterns from traveled routes to achieve rapid and accurate real-time predictions. However, existing methods ignore the complexity and dynamism of real-world traffic systems, resulting in significant gaps in efficiency and accuracy in real-time scenarios. Addressing this issue is a critical yet challenging task. This paper proposes a novel framework that redefines the implementation path of ER-TTE to achieve highly efficient and effective predictions. Firstly, we introduce a novel pipeline consisting of a Decision Maker and a Predictor to rectify the inefficient prediction strategies of current methods. The Decision Maker performs efficient real-time decisions to determine whether the high-complexity prediction model in the Predictor needs to be invoked, and the Predictor recalculates the travel time or infers from historical prediction results based on these decisions. Next, to tackle the dynamic and uncertain real-time scenarios, we model the online decision-making problem as a Markov decision process and design an intelligent agent based on reinforcement learning for autonomous decision-making. Moreover, to fully exploit the spatio-temporal correlation between online data and offline data, we meticulously design feature representation and encoding techniques based on the attention mechanism. Finally, to improve the flawed training and evaluation strategies of existing methods, we propose an end-to-end training and evaluation approach, incorporating curriculum learning strategies to manage spatio-temporal data for more advanced training algorithms. Extensive evaluations on three real-world datasets confirm that our method significantly outperforms state-of-the-art solutions in both accuracy and efficiency. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.15493v1-abstract-full').style.display = 'none'; document.getElementById('2501.15493v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by SIGMOD 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.15214">arXiv:2501.15214</a> <span> [<a href="https://arxiv.org/pdf/2501.15214">pdf</a>, <a href="https://arxiv.org/format/2501.15214">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Zero-shot Robotic Manipulation with Language-guided Instruction and Formal Task Planning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Tang%2C+J">Junfeng Tang</a>, <a href="/search/?searchtype=author&query=Ye%2C+Z">Zihan Ye</a>, <a href="/search/?searchtype=author&query=Yan%2C+Y">Yuping Yan</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Ziqi Zheng</a>, <a href="/search/?searchtype=author&query=Gao%2C+T">Ting Gao</a>, <a href="/search/?searchtype=author&query=Jin%2C+Y">Yaochu Jin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.15214v1-abstract-short" style="display: inline;"> Robotic manipulation is often challenging due to the long-horizon tasks and the complex object relationships. A common solution is to develop a task and motion planning framework that integrates planning for high-level task and low-level motion. Recently, inspired by the powerful reasoning ability of Large Language Models (LLMs), LLM-based planning approaches have achieved remarkable progress. How… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.15214v1-abstract-full').style.display = 'inline'; document.getElementById('2501.15214v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.15214v1-abstract-full" style="display: none;"> Robotic manipulation is often challenging due to the long-horizon tasks and the complex object relationships. A common solution is to develop a task and motion planning framework that integrates planning for high-level task and low-level motion. Recently, inspired by the powerful reasoning ability of Large Language Models (LLMs), LLM-based planning approaches have achieved remarkable progress. However, these methods still heavily rely on expert-specific knowledge, often generating invalid plans for unseen and unfamiliar tasks. To address this issue, we propose an innovative language-guided symbolic task planning (LM-SymOpt) framework with optimization. It is the first expert-free planning framework since we combine the world knowledge from LLMs with formal reasoning, resulting in improved generalization capability to new tasks. Specifically, differ to most existing work, our LM-SymOpt employs LLMs to translate natural language instructions into symbolic representations, thereby representing actions as high-level symbols and reducing the search space for planning. Next, after evaluating the action probability of completing the task using LLMs, a weighted random sampling method is introduced to generate candidate plans. Their feasibility is assessed through symbolic reasoning and their cost efficiency is then evaluated using trajectory optimization for selecting the optimal planning. Our experimental results show that LM-SymOpt outperforms existing LLM-based planning approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.15214v1-abstract-full').style.display = 'none'; document.getElementById('2501.15214v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.15206">arXiv:2501.15206</a> <span> [<a href="https://arxiv.org/pdf/2501.15206">pdf</a>, <a href="https://arxiv.org/ps/2501.15206">ps</a>, <a href="https://arxiv.org/format/2501.15206">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Disordered Systems and Neural Networks">cond-mat.dis-nn</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Engineering-Oriented Design of Drift-Resilient MTJ Random Number Generator via Hybrid Control Strategies </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Zhang%2C+R">Ran Zhang</a>, <a href="/search/?searchtype=author&query=Wan%2C+C">Caihua Wan</a>, <a href="/search/?searchtype=author&query=Xu%2C+Y">Yingqian Xu</a>, <a href="/search/?searchtype=author&query=Li%2C+X">Xiaohan Li</a>, <a href="/search/?searchtype=author&query=Hoffmann%2C+R">Raik Hoffmann</a>, <a href="/search/?searchtype=author&query=Hindenberg%2C+M">Meike Hindenberg</a>, <a href="/search/?searchtype=author&query=Liu%2C+S">Shiqiang Liu</a>, <a href="/search/?searchtype=author&query=Kong%2C+D">Dehao Kong</a>, <a href="/search/?searchtype=author&query=Xiong%2C+S">Shilong Xiong</a>, <a href="/search/?searchtype=author&query=He%2C+S">Shikun He</a>, <a href="/search/?searchtype=author&query=Vardar%2C+A">Alptekin Vardar</a>, <a href="/search/?searchtype=author&query=Dai%2C+Q">Qiang Dai</a>, <a href="/search/?searchtype=author&query=Gong%2C+J">Junlu Gong</a>, <a href="/search/?searchtype=author&query=Sun%2C+Y">Yihui Sun</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zejie Zheng</a>, <a href="/search/?searchtype=author&query=K%C3%A4mpfe%2C+T">Thomas K盲mpfe</a>, <a href="/search/?searchtype=author&query=Yu%2C+G">Guoqiang Yu</a>, <a href="/search/?searchtype=author&query=Han%2C+X">Xiufeng Han</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.15206v1-abstract-short" style="display: inline;"> In the quest for secure and reliable random number generation, Magnetic Tunnel Junctions (MTJs) have emerged as a promising technology due to their unique ability to exploit the stochastic nature of magnetization switching. This paper presents an engineering-oriented design of a drift-resilient MTJ-based True Random Number Generator (TRNG) utilizing a hybrid control strategy. We address the critic… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.15206v1-abstract-full').style.display = 'inline'; document.getElementById('2501.15206v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.15206v1-abstract-full" style="display: none;"> In the quest for secure and reliable random number generation, Magnetic Tunnel Junctions (MTJs) have emerged as a promising technology due to their unique ability to exploit the stochastic nature of magnetization switching. This paper presents an engineering-oriented design of a drift-resilient MTJ-based True Random Number Generator (TRNG) utilizing a hybrid control strategy. We address the critical issue of switching probability drift, which can compromise the randomness and bias the output of MTJ-based TRNGs. Our approach combines a self-stabilization strategy, which dynamically adjusts the driving voltage based on real-time feedback, with pulse width modulation to enhance control over the switching probability. Through comprehensive experimental and simulation results, we demonstrate significant improvements in the stability, uniformity, and quality of the random numbers generated. The proposed system offers flexibility and adaptability for diverse applications, making it a reliable solution for high-quality randomness in cryptography, secure communications, and beyond. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.15206v1-abstract-full').style.display = 'none'; document.getElementById('2501.15206v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.14943">arXiv:2501.14943</a> <span> [<a href="https://arxiv.org/pdf/2501.14943">pdf</a>, <a href="https://arxiv.org/format/2501.14943">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Evidence for $B^-\rightarrow D^{**0}蟿^-\overline{谓_蟿}$ decays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=LHCb+collaboration"> LHCb collaboration</a>, <a href="/search/?searchtype=author&query=Aaij%2C+R">R. Aaij</a>, <a href="/search/?searchtype=author&query=Abdelmotteleb%2C+A+S+W">A. S. W. Abdelmotteleb</a>, <a href="/search/?searchtype=author&query=Beteta%2C+C+A">C. Abellan Beteta</a>, <a href="/search/?searchtype=author&query=Abudin%C3%A9n%2C+F">F. Abudin茅n</a>, <a href="/search/?searchtype=author&query=Ackernley%2C+T">T. Ackernley</a>, <a href="/search/?searchtype=author&query=Adefisoye%2C+A+A">A. A. Adefisoye</a>, <a href="/search/?searchtype=author&query=Adeva%2C+B">B. Adeva</a>, <a href="/search/?searchtype=author&query=Adinolfi%2C+M">M. Adinolfi</a>, <a href="/search/?searchtype=author&query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&query=Agapopoulou%2C+C">C. Agapopoulou</a>, <a href="/search/?searchtype=author&query=Aidala%2C+C+A">C. A. Aidala</a>, <a href="/search/?searchtype=author&query=Ajaltouni%2C+Z">Z. Ajaltouni</a>, <a href="/search/?searchtype=author&query=Akar%2C+S">S. Akar</a>, <a href="/search/?searchtype=author&query=Akiba%2C+K">K. Akiba</a>, <a href="/search/?searchtype=author&query=Albicocco%2C+P">P. Albicocco</a>, <a href="/search/?searchtype=author&query=Albrecht%2C+J">J. Albrecht</a>, <a href="/search/?searchtype=author&query=Alessio%2C+F">F. Alessio</a>, <a href="/search/?searchtype=author&query=Alexander%2C+M">M. Alexander</a>, <a href="/search/?searchtype=author&query=Aliouche%2C+Z">Z. Aliouche</a>, <a href="/search/?searchtype=author&query=Cartelle%2C+P+A">P. Alvarez Cartelle</a>, <a href="/search/?searchtype=author&query=Amalric%2C+R">R. Amalric</a>, <a href="/search/?searchtype=author&query=Amato%2C+S">S. Amato</a>, <a href="/search/?searchtype=author&query=Amey%2C+J+L">J. L. Amey</a>, <a href="/search/?searchtype=author&query=Amhis%2C+Y">Y. Amhis</a> , et al. (1127 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.14943v1-abstract-short" style="display: inline;"> The first evidence for the decay $B^-\rightarrow D^{**0}蟿^-\overline{谓_蟿}$ is obtained using proton-proton collision data collected by the LHCb experiment, corresponding to an integrated luminosity of 9 fb$^{-1}$ , at centre-of-mass energies of 7, 8 and 13 Tev. Here, the $D^{**0}$ meson represents any of the three excited charm mesons $D_{1}(2420)^{0}$, $D_{2}^{*}(2460)^{0}$, and… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.14943v1-abstract-full').style.display = 'inline'; document.getElementById('2501.14943v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.14943v1-abstract-full" style="display: none;"> The first evidence for the decay $B^-\rightarrow D^{**0}蟿^-\overline{谓_蟿}$ is obtained using proton-proton collision data collected by the LHCb experiment, corresponding to an integrated luminosity of 9 fb$^{-1}$ , at centre-of-mass energies of 7, 8 and 13 Tev. Here, the $D^{**0}$ meson represents any of the three excited charm mesons $D_{1}(2420)^{0}$, $D_{2}^{*}(2460)^{0}$, and $D_{1}^{'}(2400)^{0}$. The $B^-\rightarrow D^{**0}蟿^-\overline{谓_蟿}$ signal is measured with a significance of 3.5 $蟽$, including systematic uncertainties. The combined branching fraction $BR(B^-\rightarrow D^{**0}_{1,2}蟿^-\overline{谓_蟿})\times BR(D^{**0}_{1,2}\rightarrow D^{*+}蟺^-)$, where $D^{**0}_{1,2}$ denotes both $D_{1}(2420)^{0}$ and $D_{2}^{*}(2460)^{0}$ contributions, is measured to be $(0.051\pm0.013(stat)\pm 0.006(syst)\pm 0.009(\rm{ext}) )\%$, where the last uncertainty reflects that of the branching fraction of the normalisation channel $B^-\rightarrow D^{**0}_{1,2}D_s^{(*)-}$. The ratio between the tauonic and muonic semileptonic $B$ decays, with the latter taken from world average values, is also determined and found to be ${\cal R}(D^{**0}_{1,2})=0.13\pm0.03(stat)\pm0.01(syst)\pm0.02\,(\rm{ext})$. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.14943v1-abstract-full').style.display = 'none'; document.getElementById('2501.14943v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">All figures and tables, along with machine-readable versions and any supplementary material and additional information, are available at https://lbfence.cern.ch/alcm/public/analysis/full-details/3300/ (LHCb public pages)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> LHCb-PAPER-2024-037, CERN-EP-2024-341 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.12779">arXiv:2501.12779</a> <span> [<a href="https://arxiv.org/pdf/2501.12779">pdf</a>, <a href="https://arxiv.org/format/2501.12779">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Observation of the $螞_b^0 \to J/蠄螢^- K^+$ and $螢_b^0 \to J/蠄螢^- 蟺^+$ decays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=LHCb+collaboration"> LHCb collaboration</a>, <a href="/search/?searchtype=author&query=Aaij%2C+R">R. Aaij</a>, <a href="/search/?searchtype=author&query=Abdelmotteleb%2C+A+S+W">A. S. W. Abdelmotteleb</a>, <a href="/search/?searchtype=author&query=Beteta%2C+C+A">C. Abellan Beteta</a>, <a href="/search/?searchtype=author&query=Abudin%C3%A9n%2C+F">F. Abudin茅n</a>, <a href="/search/?searchtype=author&query=Ackernley%2C+T">T. Ackernley</a>, <a href="/search/?searchtype=author&query=Adefisoye%2C+A+A">A. A. Adefisoye</a>, <a href="/search/?searchtype=author&query=Adeva%2C+B">B. Adeva</a>, <a href="/search/?searchtype=author&query=Adinolfi%2C+M">M. Adinolfi</a>, <a href="/search/?searchtype=author&query=Adlarson%2C+P">P. Adlarson</a>, <a href="/search/?searchtype=author&query=Agapopoulou%2C+C">C. Agapopoulou</a>, <a href="/search/?searchtype=author&query=Aidala%2C+C+A">C. A. Aidala</a>, <a href="/search/?searchtype=author&query=Ajaltouni%2C+Z">Z. Ajaltouni</a>, <a href="/search/?searchtype=author&query=Akar%2C+S">S. Akar</a>, <a href="/search/?searchtype=author&query=Akiba%2C+K">K. Akiba</a>, <a href="/search/?searchtype=author&query=Albicocco%2C+P">P. Albicocco</a>, <a href="/search/?searchtype=author&query=Albrecht%2C+J">J. Albrecht</a>, <a href="/search/?searchtype=author&query=Alessio%2C+F">F. Alessio</a>, <a href="/search/?searchtype=author&query=Alexander%2C+M">M. Alexander</a>, <a href="/search/?searchtype=author&query=Aliouche%2C+Z">Z. Aliouche</a>, <a href="/search/?searchtype=author&query=Cartelle%2C+P+A">P. Alvarez Cartelle</a>, <a href="/search/?searchtype=author&query=Amalric%2C+R">R. Amalric</a>, <a href="/search/?searchtype=author&query=Amato%2C+S">S. Amato</a>, <a href="/search/?searchtype=author&query=Amey%2C+J+L">J. L. Amey</a>, <a href="/search/?searchtype=author&query=Amhis%2C+Y">Y. Amhis</a> , et al. (1126 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.12779v1-abstract-short" style="display: inline;"> The first observation of the $螢_b^0 \to J/蠄螢^- 蟺^+$ decay and the most precise measurement of the branching fraction of the $螞_b^0 \to J/蠄螢^- K^+$ decay are reported, using proton-proton collision data from the LHCb experiment collected in 2016--2018 at a centre-of-mass energy of 13~TeV, corresponding to an integrated luminosity of 5.4~fb$^{-1}$. Using the $螞_b^0 \to J/蠄螞$ and $螢_b^0 \to J/蠄螢^-$ d… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.12779v1-abstract-full').style.display = 'inline'; document.getElementById('2501.12779v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.12779v1-abstract-full" style="display: none;"> The first observation of the $螢_b^0 \to J/蠄螢^- 蟺^+$ decay and the most precise measurement of the branching fraction of the $螞_b^0 \to J/蠄螢^- K^+$ decay are reported, using proton-proton collision data from the LHCb experiment collected in 2016--2018 at a centre-of-mass energy of 13~TeV, corresponding to an integrated luminosity of 5.4~fb$^{-1}$. Using the $螞_b^0 \to J/蠄螞$ and $螢_b^0 \to J/蠄螢^-$ decays as normalisation channels, the ratios of branching fractions are measured to be: \[ \frac{\mathcal{B}(螞_b^0 \to J/蠄螢^- K^+)}{\mathcal{B}(螞_b^0 \to J/蠄螞)} = (1.17 \pm 0.14 \pm 0.08)\times 10^{-2} \, , \] \[ \frac{\mathcal{B}(螢_b^0 \to J/蠄螢^- 蟺^+)}{\mathcal{B}(螢_b^0 \to J/蠄螢^-)} = (11.9 \pm 1.4 \pm 0.6)\times 10^{-2}\, , \] where the first uncertainty is statistical and the second systematic. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.12779v1-abstract-full').style.display = 'none'; document.getElementById('2501.12779v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">All figures and tables, along with machine-readable versions and any supplementary material and additional information, are available at https://lbfence.cern.ch/alcm/public/analysis/full-details/3479/ (LHCb public pages)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> CERN-EP-2024-337 LHCb-PAPER-2024-049 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.12650">arXiv:2501.12650</a> <span> [<a href="https://arxiv.org/pdf/2501.12650">pdf</a>, <a href="https://arxiv.org/format/2501.12650">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Earth and Planetary Astrophysics">astro-ph.EP</span> </div> </div> <p class="title is-5 mathjax"> Resonance Capture and Stability Analysis for Planet Pairs under Type I Disk Migration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Lin%2C+L">Linghong Lin</a>, <a href="/search/?searchtype=author&query=Liu%2C+B">Beibei Liu</a>, <a href="/search/?searchtype=author&query=Zheng%2C+Z">Zekai Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.12650v1-abstract-short" style="display: inline;"> We present a theoretical model to investigate a two-planet pair that undergoes convergent type I migration in a gaseous protoplanetary disk and traps into the first-order mean motion resonance. Our study identifies the conditions for resonant capture and explores the subsequent dynamical stability of the system. We derive the analytical criteria for planets with an arbitrary mass ratio and validat… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.12650v1-abstract-full').style.display = 'inline'; document.getElementById('2501.12650v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.12650v1-abstract-full" style="display: none;"> We present a theoretical model to investigate a two-planet pair that undergoes convergent type I migration in a gaseous protoplanetary disk and traps into the first-order mean motion resonance. Our study identifies the conditions for resonant capture and explores the subsequent dynamical stability of the system. We derive the analytical criteria for planets with an arbitrary mass ratio and validate through numerical N-body simulations. Slow migration and weak eccentricity damping are required for resonant capture, the latter of which has not received enough attention in the literature. Once capture into resonance, their following stability can be classified into three regimes: stable trap, overstable trap and escape. Notably, resonant capture remains stable when the inner planet significantly outweighs the outer one. However, the subsequent evolution can be diverse when the mass of the inner planet is lower or comparable to that of the outer planet. Stability weakens as the relative strength between migration and eccentricity damping increases. The key results can be comprehensively demonstrated in a $蟿_{a}$-$蟿_{a}/蟿_{\rm e}$ plot, where $蟿_{a}$ and $蟿_{\rm e}$ are orbital decay and eccentricity damping timescales, respectively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.12650v1-abstract-full').style.display = 'none'; document.getElementById('2501.12650v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages, 11 figures, submitted to A&A</span> </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&query=Zheng%2C+Z&start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&query=Zheng%2C+Z&start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&query=Zheng%2C+Z&start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&query=Zheng%2C+Z&start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&query=Zheng%2C+Z&start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&query=Zheng%2C+Z&start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">…</span></li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>