CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 691 results for author: <span class="mathjax">Zhao, C</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Zhao%2C+C">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Zhao, C"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Zhao%2C+C&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Zhao, C"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Zhao%2C+C&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Zhao%2C+C&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhao%2C+C&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhao%2C+C&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhao%2C+C&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhao%2C+C&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13952">arXiv:2411.13952</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13952">pdf</a>, <a href="https://arxiv.org/format/2411.13952">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Learning thin deformable object manipulation with a multi-sensory integrated soft hand </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chao Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+C">Chunli Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+L">Lifan Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Yuan%2C+S">Shuai Yuan</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Q">Qifeng Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+H">Hongyu Yu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13952v1-abstract-short" style="display: inline;"> Robotic manipulation has made significant advancements, with systems demonstrating high precision and repeatability. However, this remarkable precision often fails to translate into efficient manipulation of thin deformable objects. Current robotic systems lack imprecise dexterity, the ability to perform dexterous manipulation through robust and adaptive behaviors that do not rely on precise contr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13952v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13952v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13952v1-abstract-full" style="display: none;"> Robotic manipulation has made significant advancements, with systems demonstrating high precision and repeatability. However, this remarkable precision often fails to translate into efficient manipulation of thin deformable objects. Current robotic systems lack imprecise dexterity, the ability to perform dexterous manipulation through robust and adaptive behaviors that do not rely on precise control. This paper explores the singulation and grasping of thin, deformable objects. Here, we propose a novel solution that incorporates passive compliance, touch, and proprioception into thin, deformable object manipulation. Our system employs a soft, underactuated hand that provides passive compliance, facilitating adaptive and gentle interactions to dexterously manipulate deformable objects without requiring precise control. The tactile and force/torque sensors equipped on the hand, along with a depth camera, gather sensory data required for manipulation via the proposed slip module. The manipulation policies are learned directly from raw sensory data via model-free reinforcement learning, bypassing explicit environmental and object modeling. We implement a hierarchical double-loop learning process to enhance learning efficiency by decoupling the action space. Our method was deployed on real-world robots and trained in a self-supervised manner. The resulting policy was tested on a variety of challenging tasks that were beyond the capabilities of prior studies, ranging from displaying suit fabric like a salesperson to turning pages of sheet music for violinists. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13952v1-abstract-full').style.display = 'none'; document.getElementById('2411.13952v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">19 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13602">arXiv:2411.13602</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13602">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Large-scale cross-modality pretrained model enhances cardiovascular state estimation and cardiomyopathy detection from electrocardiograms: An AI system development and multi-center validation study </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ding%2C+Z">Zhengyao Ding</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+Y">Yujian Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Y">Youyao Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chengchen Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Z">Ziyu Li</a>, <a href="/search/cs?searchtype=author&amp;query=Mao%2C+Y">Yiheng Mao</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+H">Haitao Li</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Q">Qian Li</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J">Jing Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Yue Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+M">Mengjia Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+L">Longbo Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Chu%2C+X">Xuesen Chu</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+W">Weichao Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Ziyi Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+F">Fei Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H">Hongkun Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+T">Ting Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+Z">Zhengxing Huang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13602v1-abstract-short" style="display: inline;"> Cardiovascular diseases (CVDs) present significant challenges for early and accurate diagnosis. While cardiac magnetic resonance imaging (CMR) is the gold standard for assessing cardiac function and diagnosing CVDs, its high cost and technical complexity limit accessibility. In contrast, electrocardiography (ECG) offers promise for large-scale early screening. This study introduces CardiacNets, an&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13602v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13602v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13602v1-abstract-full" style="display: none;"> Cardiovascular diseases (CVDs) present significant challenges for early and accurate diagnosis. While cardiac magnetic resonance imaging (CMR) is the gold standard for assessing cardiac function and diagnosing CVDs, its high cost and technical complexity limit accessibility. In contrast, electrocardiography (ECG) offers promise for large-scale early screening. This study introduces CardiacNets, an innovative model that enhances ECG analysis by leveraging the diagnostic strengths of CMR through cross-modal contrastive learning and generative pretraining. CardiacNets serves two primary functions: (1) it evaluates detailed cardiac function indicators and screens for potential CVDs, including coronary artery disease, cardiomyopathy, pericarditis, heart failure and pulmonary hypertension, using ECG input; and (2) it enhances interpretability by generating high-quality CMR images from ECG data. We train and validate the proposed CardiacNets on two large-scale public datasets (the UK Biobank with 41,519 individuals and the MIMIC-IV-ECG comprising 501,172 samples) as well as three private datasets (FAHZU with 410 individuals, SAHZU with 464 individuals, and QPH with 338 individuals), and the findings demonstrate that CardiacNets consistently outperforms traditional ECG-only models, substantially improving screening accuracy. Furthermore, the generated CMR images provide valuable diagnostic support for physicians of all experience levels. This proof-of-concept study highlights how ECG can facilitate cross-modal insights into cardiac function assessment, paving the way for enhanced CVD screening and diagnosis at a population level. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13602v1-abstract-full').style.display = 'none'; document.getElementById('2411.13602v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">23 pages, 8 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12915">arXiv:2411.12915</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12915">pdf</a>, <a href="https://arxiv.org/format/2411.12915">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> VILA-M3: Enhancing Vision-Language Models with Medical Expert Knowledge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Nath%2C+V">Vishwesh Nath</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+W">Wenqi Li</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+D">Dong Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Myronenko%2C+A">Andriy Myronenko</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+M">Mingxin Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+Y">Yao Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zhijian Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Yin%2C+H">Hongxu Yin</a>, <a href="/search/cs?searchtype=author&amp;query=Law%2C+Y+M">Yee Man Law</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+Y">Yucheng Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+P">Pengfei Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Can Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Z">Ziyue Xu</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+Y">Yufan He</a>, <a href="/search/cs?searchtype=author&amp;query=Heinrich%2C+G">Greg Heinrich</a>, <a href="/search/cs?searchtype=author&amp;query=Aylward%2C+S">Stephen Aylward</a>, <a href="/search/cs?searchtype=author&amp;query=Edgar%2C+M">Marc Edgar</a>, <a href="/search/cs?searchtype=author&amp;query=Zephyr%2C+M">Michael Zephyr</a>, <a href="/search/cs?searchtype=author&amp;query=Molchanov%2C+P">Pavlo Molchanov</a>, <a href="/search/cs?searchtype=author&amp;query=Turkbey%2C+B">Baris Turkbey</a>, <a href="/search/cs?searchtype=author&amp;query=Roth%2C+H">Holger Roth</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+D">Daguang Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12915v1-abstract-short" style="display: inline;"> Generalist vision language models (VLMs) have made significant strides in computer vision, but they fall short in specialized fields like healthcare, where expert knowledge is essential. In traditional computer vision tasks, creative or approximate answers may be acceptable, but in healthcare, precision is paramount.Current large multimodal models like Gemini and GPT-4o are insufficient for medica&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12915v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12915v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12915v1-abstract-full" style="display: none;"> Generalist vision language models (VLMs) have made significant strides in computer vision, but they fall short in specialized fields like healthcare, where expert knowledge is essential. In traditional computer vision tasks, creative or approximate answers may be acceptable, but in healthcare, precision is paramount.Current large multimodal models like Gemini and GPT-4o are insufficient for medical tasks due to their reliance on memorized internet knowledge rather than the nuanced expertise required in healthcare. VLMs are usually trained in three stages: vision pre-training, vision-language pre-training, and instruction fine-tuning (IFT). IFT has been typically applied using a mixture of generic and healthcare data. In contrast, we propose that for medical VLMs, a fourth stage of specialized IFT is necessary, which focuses on medical data and includes information from domain expert models. Domain expert models developed for medical use are crucial because they are specifically trained for certain clinical tasks, e.g. to detect tumors and classify abnormalities through segmentation and classification, which learn fine-grained features of medical data$-$features that are often too intricate for a VLM to capture effectively especially in radiology. This paper introduces a new framework, VILA-M3, for medical VLMs that utilizes domain knowledge via expert models. Through our experiments, we show an improved state-of-the-art (SOTA) performance with an average improvement of ~9% over the prior SOTA model Med-Gemini and ~6% over models trained on the specific tasks. Our approach emphasizes the importance of domain expertise in creating precise, reliable VLMs for medical applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12915v1-abstract-full').style.display = 'none'; document.getElementById('2411.12915v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12913">arXiv:2411.12913</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12913">pdf</a>, <a href="https://arxiv.org/format/2411.12913">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> MLDGG: Meta-Learning for Domain Generalization on Graphs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tian%2C+Q">Qin Tian</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chen Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Shao%2C+M">Minglai Shao</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+W">Wenjun Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+Y">Yujie Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+D">Dong Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12913v1-abstract-short" style="display: inline;"> Domain generalization on graphs aims to develop models with robust generalization capabilities, ensuring effective performance on the testing set despite disparities between testing and training distributions. However, existing methods often rely on static encoders directly applied to the target domain, constraining its flexible adaptability. In contrast to conventional methodologies, which concen&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12913v1-abstract-full').style.display = 'inline'; document.getElementById('2411.12913v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12913v1-abstract-full" style="display: none;"> Domain generalization on graphs aims to develop models with robust generalization capabilities, ensuring effective performance on the testing set despite disparities between testing and training distributions. However, existing methods often rely on static encoders directly applied to the target domain, constraining its flexible adaptability. In contrast to conventional methodologies, which concentrate on developing specific generalized models, our framework, MLDGG, endeavors to achieve adaptable generalization across diverse domains by integrating cross-multi-domain meta-learning with structure learning and semantic identification. Initially, it introduces a generalized structure learner to mitigate the adverse effects of task-unrelated edges, enhancing the comprehensiveness of representations learned by Graph Neural Networks (GNNs) while capturing shared structural information across domains. Subsequently, a representation learner is designed to disentangle domain-invariant semantic and domain-specific variation information in node embedding by leveraging causal reasoning for semantic identification, further enhancing generalization. In the context of meta-learning, meta-parameters for both learners are optimized to facilitate knowledge transfer and enable effective adaptation to graphs through fine-tuning within the target domains, where target graphs are inaccessible during training. Our empirical results demonstrate that MLDGG surpasses baseline methods, showcasing its effectiveness in three different distribution shift settings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12913v1-abstract-full').style.display = 'none'; document.getElementById('2411.12913v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in KDD 2025 (research track)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.12309">arXiv:2411.12309</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.12309">pdf</a>, <a href="https://arxiv.org/format/2411.12309">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DGTR: Distributed Gaussian Turbo-Reconstruction for Sparse-View Vast Scenes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+H">Hao Li</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+Y">Yuanyuan Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Peng%2C+H">Haosong Peng</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+C">Chenming Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Ye%2C+W">Weicai Ye</a>, <a href="/search/cs?searchtype=author&amp;query=Zhan%2C+Y">Yufeng Zhan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chen Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+D">Dingwen Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J">Jingdong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+J">Junwei Han</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.12309v2-abstract-short" style="display: inline;"> Novel-view synthesis (NVS) approaches play a critical role in vast scene reconstruction. However, these methods rely heavily on dense image inputs and prolonged training times, making them unsuitable where computational resources are limited. Additionally, few-shot methods often struggle with poor reconstruction quality in vast environments. This paper presents DGTR, a novel distributed framework&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12309v2-abstract-full').style.display = 'inline'; document.getElementById('2411.12309v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.12309v2-abstract-full" style="display: none;"> Novel-view synthesis (NVS) approaches play a critical role in vast scene reconstruction. However, these methods rely heavily on dense image inputs and prolonged training times, making them unsuitable where computational resources are limited. Additionally, few-shot methods often struggle with poor reconstruction quality in vast environments. This paper presents DGTR, a novel distributed framework for efficient Gaussian reconstruction for sparse-view vast scenes. Our approach divides the scene into regions, processed independently by drones with sparse image inputs. Using a feed-forward Gaussian model, we predict high-quality Gaussian primitives, followed by a global alignment algorithm to ensure geometric consistency. Synthetic views and depth priors are incorporated to further enhance training, while a distillation-based model aggregation mechanism enables efficient reconstruction. Our method achieves high-quality large-scale scene reconstruction and novel-view synthesis in significantly reduced training times, outperforming existing approaches in both speed and scalability. We demonstrate the effectiveness of our framework on vast aerial scenes, achieving high-quality results within minutes. Code will released on our [https://3d-aigc.github.io/DGTR]. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.12309v2-abstract-full').style.display = 'none'; document.getElementById('2411.12309v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Code will released on our [https://3d-aigc.github.io/DGTR]</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11647">arXiv:2411.11647</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11647">pdf</a>, <a href="https://arxiv.org/ps/2411.11647">ps</a>, <a href="https://arxiv.org/format/2411.11647">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> No-regret Exploration in Shuffle Private Reinforcement Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bai%2C+S">Shaojie Bai</a>, <a href="/search/cs?searchtype=author&amp;query=Talebi%2C+M+S">Mohammad Sadegh Talebi</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chengcheng Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+P">Peng Cheng</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jiming Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11647v1-abstract-short" style="display: inline;"> Differential privacy (DP) has recently been introduced into episodic reinforcement learning (RL) to formally address user privacy concerns in personalized services. Previous work mainly focuses on two trust models of DP: the central model, where a central agent is responsible for protecting users&#39; sensitive data, and the (stronger) local model, where the protection occurs directly on the user side&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11647v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11647v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11647v1-abstract-full" style="display: none;"> Differential privacy (DP) has recently been introduced into episodic reinforcement learning (RL) to formally address user privacy concerns in personalized services. Previous work mainly focuses on two trust models of DP: the central model, where a central agent is responsible for protecting users&#39; sensitive data, and the (stronger) local model, where the protection occurs directly on the user side. However, they either require a trusted central agent or incur a significantly higher privacy cost, making it unsuitable for many scenarios. This work introduces a trust model stronger than the central model but with a lower privacy cost than the local model, leveraging the emerging \emph{shuffle} model of privacy. We present the first generic algorithm for episodic RL under the shuffle model, where a trusted shuffler randomly permutes a batch of users&#39; data before sending it to the central agent. We then instantiate the algorithm using our proposed shuffle Privatizer, relying on a shuffle private binary summation mechanism. Our analysis shows that the algorithm achieves a near-optimal regret bound comparable to that of the centralized model and significantly outperforms the local model in terms of privacy cost. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11647v1-abstract-full').style.display = 'none'; document.getElementById('2411.11647v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11262">arXiv:2411.11262</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11262">pdf</a>, <a href="https://arxiv.org/format/2411.11262">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Cross-Patient Pseudo Bags Generation and Curriculum Contrastive Learning for Imbalanced Multiclassification of Whole Slide Image </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Y">Yonghuang Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+X">Xuan Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Niu%2C+X">Xinyuan Niu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chengqian Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+J">Jinhua Yu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11262v1-abstract-short" style="display: inline;"> Pathology computing has dramatically improved pathologists&#39; workflow and diagnostic decision-making processes. Although computer-aided diagnostic systems have shown considerable value in whole slide image (WSI) analysis, the problem of multi-classification under sample imbalance remains an intractable challenge. To address this, we propose learning fine-grained information by generating sub-bags w&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11262v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11262v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11262v1-abstract-full" style="display: none;"> Pathology computing has dramatically improved pathologists&#39; workflow and diagnostic decision-making processes. Although computer-aided diagnostic systems have shown considerable value in whole slide image (WSI) analysis, the problem of multi-classification under sample imbalance remains an intractable challenge. To address this, we propose learning fine-grained information by generating sub-bags with feature distributions similar to the original WSIs. Additionally, we utilize a pseudo-bag generation algorithm to further leverage the abundant and redundant information in WSIs, allowing efficient training in unbalanced-sample multi-classification tasks. Furthermore, we introduce an affinity-based sample selection and curriculum contrastive learning strategy to enhance the stability of model representation learning. Unlike previous approaches, our framework transitions from learning bag-level representations to understanding and exploiting the feature distribution of multi-instance bags. Our method demonstrates significant performance improvements on three datasets, including tumor classification and lymph node metastasis. On average, it achieves a 4.39-point improvement in F1 score compared to the second-best method across the three tasks, underscoring its superior performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11262v1-abstract-full').style.display = 'none'; document.getElementById('2411.11262v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 4 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.07392">arXiv:2411.07392</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.07392">pdf</a>, <a href="https://arxiv.org/format/2411.07392">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Feature-Space Semantic Invariance: Enhanced OOD Detection for Open-Set Domain Generalization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Haoliang Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chen Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+F">Feng Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.07392v1-abstract-short" style="display: inline;"> Open-set domain generalization addresses a real-world challenge: training a model to generalize across unseen domains (domain generalization) while also detecting samples from unknown classes not encountered during training (open-set recognition). However, most existing approaches tackle these issues separately, limiting their practical applicability. To overcome this limitation, we propose a unif&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07392v1-abstract-full').style.display = 'inline'; document.getElementById('2411.07392v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.07392v1-abstract-full" style="display: none;"> Open-set domain generalization addresses a real-world challenge: training a model to generalize across unseen domains (domain generalization) while also detecting samples from unknown classes not encountered during training (open-set recognition). However, most existing approaches tackle these issues separately, limiting their practical applicability. To overcome this limitation, we propose a unified framework for open-set domain generalization by introducing Feature-space Semantic Invariance (FSI). FSI maintains semantic consistency across different domains within the feature space, enabling more accurate detection of OOD instances in unseen domains. Additionally, we adopt a generative model to produce synthetic data with novel domain styles or class labels, enhancing model robustness. Initial experiments show that our method improves AUROC by 9.1% to 18.9% on ColoredMNIST, while also significantly increasing in-distribution classification accuracy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07392v1-abstract-full').style.display = 'none'; document.getElementById('2411.07392v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">IEEE BigData 2024, Ph.D. Forum</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.05764">arXiv:2411.05764</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.05764">pdf</a>, <a href="https://arxiv.org/format/2411.05764">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> FinDVer: Explainable Claim Verification over Long and Hybrid-Content Financial Documents </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Y">Yilun Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Long%2C+Y">Yitao Long</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Y">Yuru Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+C">Chengye Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+W">Weiyuan Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+H">Hongjun Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yiming Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+X">Xiangru Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chen Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Cohan%2C+A">Arman Cohan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.05764v1-abstract-short" style="display: inline;"> We introduce FinDVer, a comprehensive benchmark specifically designed to evaluate the explainable claim verification capabilities of LLMs in the context of understanding and analyzing long, hybrid-content financial documents. FinDVer contains 2,400 expert-annotated examples, divided into three subsets: information extraction, numerical reasoning, and knowledge-intensive reasoning, each addressing&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05764v1-abstract-full').style.display = 'inline'; document.getElementById('2411.05764v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.05764v1-abstract-full" style="display: none;"> We introduce FinDVer, a comprehensive benchmark specifically designed to evaluate the explainable claim verification capabilities of LLMs in the context of understanding and analyzing long, hybrid-content financial documents. FinDVer contains 2,400 expert-annotated examples, divided into three subsets: information extraction, numerical reasoning, and knowledge-intensive reasoning, each addressing common scenarios encountered in real-world financial contexts. We assess a broad spectrum of LLMs under long-context and RAG settings. Our results show that even the current best-performing system, GPT-4o, still lags behind human experts. We further provide in-depth analysis on long-context and RAG setting, Chain-of-Thought reasoning, and model reasoning errors, offering insights to drive future advancements. We believe that FinDVer can serve as a valuable benchmark for evaluating LLMs in claim verification over complex, expert-domain documents. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.05764v1-abstract-full').style.display = 'none'; document.getElementById('2411.05764v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">EMNLP 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02671">arXiv:2411.02671</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.02671">pdf</a>, <a href="https://arxiv.org/format/2411.02671">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Fair In-Context Learning via Latent Concept Variables </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bhaila%2C+K">Karuna Bhaila</a>, <a href="/search/cs?searchtype=author&amp;query=Van%2C+M">Minh-Hao Van</a>, <a href="/search/cs?searchtype=author&amp;query=Edemacu%2C+K">Kennedy Edemacu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chen Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+F">Feng Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+X">Xintao Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02671v1-abstract-short" style="display: inline;"> The emerging in-context learning (ICL) ability of large language models (LLMs) has prompted their use for predictive tasks in various domains with different types of data facilitated by serialization methods. However, with increasing applications in high-stakes domains, it has been shown that LLMs can inherit social bias and discrimination from their pre-training data. In this work, we investigate&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02671v1-abstract-full').style.display = 'inline'; document.getElementById('2411.02671v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02671v1-abstract-full" style="display: none;"> The emerging in-context learning (ICL) ability of large language models (LLMs) has prompted their use for predictive tasks in various domains with different types of data facilitated by serialization methods. However, with increasing applications in high-stakes domains, it has been shown that LLMs can inherit social bias and discrimination from their pre-training data. In this work, we investigate this inherent bias in LLMs during in-context learning with tabular data. We focus on an optimal demonstration selection approach that utilizes latent concept variables for resource-efficient task adaptation. We design data augmentation strategies that reduce correlation between predictive outcomes and sensitive variables helping to promote fairness during latent concept learning. We utilize the learned concept and select demonstrations from a training dataset to obtain fair predictions during inference while maintaining model utility. The latent concept variable is learned using a smaller internal LLM and the selected demonstrations can be used for inference with larger external LLMs. We empirically verify that the fair latent variable approach improves fairness results on tabular datasets compared to multiple heuristic demonstration selection methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02671v1-abstract-full').style.display = 'none'; document.getElementById('2411.02671v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02444">arXiv:2411.02444</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.02444">pdf</a>, <a href="https://arxiv.org/format/2411.02444">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> MADOD: Generalizing OOD Detection to Unseen Domains via G-Invariance Meta-Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Haoliang Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chen Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+F">Feng Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02444v1-abstract-short" style="display: inline;"> Real-world machine learning applications often face simultaneous covariate and semantic shifts, challenging traditional domain generalization and out-of-distribution (OOD) detection methods. We introduce Meta-learned Across Domain Out-of-distribution Detection (MADOD), a novel framework designed to address both shifts concurrently. MADOD leverages meta-learning and G-invariance to enhance model ge&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02444v1-abstract-full').style.display = 'inline'; document.getElementById('2411.02444v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02444v1-abstract-full" style="display: none;"> Real-world machine learning applications often face simultaneous covariate and semantic shifts, challenging traditional domain generalization and out-of-distribution (OOD) detection methods. We introduce Meta-learned Across Domain Out-of-distribution Detection (MADOD), a novel framework designed to address both shifts concurrently. MADOD leverages meta-learning and G-invariance to enhance model generalizability and OOD detection in unseen domains. Our key innovation lies in task construction: we randomly designate in-distribution classes as pseudo-OODs within each meta-learning task, simulating OOD scenarios using existing data. This approach, combined with energy-based regularization, enables the learning of robust, domain-invariant features while calibrating decision boundaries for effective OOD detection. Operating in a test domain-agnostic setting, MADOD eliminates the need for adaptation during inference, making it suitable for scenarios where test data is unavailable. Extensive experiments on real-world and synthetic datasets demonstrate MADOD&#39;s superior performance in semantic OOD detection across unseen domains, achieving an AUPR improvement of 8.48% to 20.81%, while maintaining competitive in-distribution classification accuracy, representing a significant advancement in handling both covariate and semantic shifts. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02444v1-abstract-full').style.display = 'none'; document.getElementById('2411.02444v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">IEEE International Conference on Big Data 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02265">arXiv:2411.02265</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.02265">pdf</a>, <a href="https://arxiv.org/format/2411.02265">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Hunyuan-Large: An Open-Source MoE Model with 52 Billion Activated Parameters by Tencent </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sun%2C+X">Xingwu Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Yanfeng Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+Y">Yiqing Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+R">Ruobing Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+J">Jiaqi Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+K">Kai Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+S">Shuaipeng Li</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Z">Zhen Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+J">Jonny Han</a>, <a href="/search/cs?searchtype=author&amp;query=Shu%2C+X">Xiaobo Shu</a>, <a href="/search/cs?searchtype=author&amp;query=Bu%2C+J">Jiahao Bu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Z">Zhongzhi Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+X">Xuemeng Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Lian%2C+F">Fengzong Lian</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+S">Saiyong Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+J">Jianfeng Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Zeng%2C+Y">Yuyuan Zeng</a>, <a href="/search/cs?searchtype=author&amp;query=Ren%2C+X">Xiaoqin Ren</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+C">Chao Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+L">Lulu Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Mao%2C+Y">Yue Mao</a>, <a href="/search/cs?searchtype=author&amp;query=Xia%2C+J">Jun Xia</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+T">Tao Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+S">Suncong Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+K">Kan Wu</a> , et al. (83 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02265v3-abstract-short" style="display: inline;"> In this paper, we introduce Hunyuan-Large, which is currently the largest open-source Transformer-based mixture of experts model, with a total of 389 billion parameters and 52 billion activation parameters, capable of handling up to 256K tokens. We conduct a thorough evaluation of Hunyuan-Large&#39;s superior performance across various benchmarks including language understanding and generation, logica&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02265v3-abstract-full').style.display = 'inline'; document.getElementById('2411.02265v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02265v3-abstract-full" style="display: none;"> In this paper, we introduce Hunyuan-Large, which is currently the largest open-source Transformer-based mixture of experts model, with a total of 389 billion parameters and 52 billion activation parameters, capable of handling up to 256K tokens. We conduct a thorough evaluation of Hunyuan-Large&#39;s superior performance across various benchmarks including language understanding and generation, logical reasoning, mathematical problem-solving, coding, long-context, and aggregated tasks, where it outperforms LLama3.1-70B and exhibits comparable performance when compared to the significantly larger LLama3.1-405B model. Key practice of Hunyuan-Large include large-scale synthetic data that is orders larger than in previous literature, a mixed expert routing strategy, a key-value cache compression technique, and an expert-specific learning rate strategy. Additionally, we also investigate the scaling laws and learning rate schedule of mixture of experts models, providing valuable insights and guidances for future model development and optimization. The code and checkpoints of Hunyuan-Large are released to facilitate future innovations and applications. Codes: https://github.com/Tencent/Hunyuan-Large Models: https://huggingface.co/tencent/Tencent-Hunyuan-Large <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02265v3-abstract-full').style.display = 'none'; document.getElementById('2411.02265v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">17 pages, 4 Figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.01353">arXiv:2411.01353</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.01353">pdf</a>, <a href="https://arxiv.org/format/2411.01353">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Can Large Language Model Predict Employee Attrition? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ma%2C+X">Xiaoye Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+W">Weiheng Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Changyi Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Tukhvatulina%2C+L+R">Liliya R. Tukhvatulina</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.01353v1-abstract-short" style="display: inline;"> Employee attrition poses significant costs for organizations, with traditional statistical prediction methods often struggling to capture modern workforce complexities. Machine learning (ML) advancements offer more scalable and accurate solutions, but large language models (LLMs) introduce new potential in human resource management by interpreting nuanced employee communication and detecting subtl&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01353v1-abstract-full').style.display = 'inline'; document.getElementById('2411.01353v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.01353v1-abstract-full" style="display: none;"> Employee attrition poses significant costs for organizations, with traditional statistical prediction methods often struggling to capture modern workforce complexities. Machine learning (ML) advancements offer more scalable and accurate solutions, but large language models (LLMs) introduce new potential in human resource management by interpreting nuanced employee communication and detecting subtle turnover cues. This study leverages the IBM HR Analytics Attrition dataset to compare the predictive accuracy and interpretability of a fine-tuned GPT-3.5 model against traditional ML classifiers, including Logistic Regression, k-Nearest Neighbors (KNN), Support Vector Machine (SVM), Decision Tree, Random Forest, AdaBoost, and XGBoost. While traditional models are easier to use and interpret, LLMs can reveal deeper patterns in employee behavior. Our findings show that the fine-tuned GPT-3.5 model outperforms traditional methods with a precision of 0.91, recall of 0.94, and an F1-score of 0.92, while the best traditional model, SVM, achieved an F1-score of 0.82, with Random Forest and XGBoost reaching 0.80. These results highlight GPT-3.5&#39;s ability to capture complex patterns in attrition risk, offering organizations improved insights for retention strategies and underscoring the value of LLMs in HR applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01353v1-abstract-full').style.display = 'none'; document.getElementById('2411.01353v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.01316">arXiv:2411.01316</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.01316">pdf</a>, <a href="https://arxiv.org/format/2411.01316">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> FEED: Fairness-Enhanced Meta-Learning for Domain Generalization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+K">Kai Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chen Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Haoliang Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+F">Feng Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.01316v1-abstract-short" style="display: inline;"> Generalizing to out-of-distribution data while being aware of model fairness is a significant and challenging problem in meta-learning. The goal of this problem is to find a set of fairness-aware invariant parameters of classifier that is trained using data drawn from a family of related training domains with distribution shift on non-sensitive features as well as different levels of dependence be&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01316v1-abstract-full').style.display = 'inline'; document.getElementById('2411.01316v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.01316v1-abstract-full" style="display: none;"> Generalizing to out-of-distribution data while being aware of model fairness is a significant and challenging problem in meta-learning. The goal of this problem is to find a set of fairness-aware invariant parameters of classifier that is trained using data drawn from a family of related training domains with distribution shift on non-sensitive features as well as different levels of dependence between model predictions and sensitive features so that the classifier can achieve good generalization performance on unknown but distinct test domains. To tackle this challenge, existing state-of-the-art methods either address the domain generalization problem but completely ignore learning with fairness or solely specify shifted domains with various fairness levels. This paper introduces an approach to fairness-aware meta-learning that significantly enhances domain generalization capabilities. Our framework, Fairness-Enhanced Meta-Learning for Domain Generalization (FEED), disentangles latent data representations into content, style, and sensitive vectors. This disentanglement facilitates the robust generalization of machine learning models across diverse domains while adhering to fairness constraints. Unlike traditional methods that focus primarily on domain invariance or sensitivity to shifts, our model integrates a fairness-aware invariance criterion directly into the meta-learning process. This integration ensures that the learned parameters uphold fairness consistently, even when domain characteristics vary widely. We validate our approach through extensive experiments across multiple benchmarks, demonstrating not only superior performance in maintaining high accuracy and fairness but also significant improvements over existing state-of-the-art methods in domain generalization tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01316v1-abstract-full').style.display = 'none'; document.getElementById('2411.01316v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">IEEE International Conference on Big Data 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.01159">arXiv:2411.01159</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.01159">pdf</a>, <a href="https://arxiv.org/format/2411.01159">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Supervised Score-Based Modeling by Gradient Boosting </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Changyuan Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+H">Hongyang Du</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+G">Guangyuan Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Niyato%2C+D">Dusit Niyato</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.01159v1-abstract-short" style="display: inline;"> Score-based generative models can effectively learn the distribution of data by estimating the gradient of the distribution. Due to the multi-step denoising characteristic, researchers have recently considered combining score-based generative models with the gradient boosting algorithm, a multi-step supervised learning algorithm, to solve supervised learning tasks. However, existing generative mod&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01159v1-abstract-full').style.display = 'inline'; document.getElementById('2411.01159v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.01159v1-abstract-full" style="display: none;"> Score-based generative models can effectively learn the distribution of data by estimating the gradient of the distribution. Due to the multi-step denoising characteristic, researchers have recently considered combining score-based generative models with the gradient boosting algorithm, a multi-step supervised learning algorithm, to solve supervised learning tasks. However, existing generative model algorithms are often limited by the stochastic nature of the models and the long inference time, impacting prediction performances. Therefore, we propose a Supervised Score-based Model (SSM), which can be viewed as a gradient boosting algorithm combining score matching. We provide a theoretical analysis of learning and sampling for SSM to balance inference time and prediction accuracy. Via the ablation experiment in selected examples, we demonstrate the outstanding performances of the proposed techniques. Additionally, we compare our model with other probabilistic models, including Natural Gradient Boosting (NGboost), Classification and Regression Diffusion Models (CARD), Diffusion Boosted Trees (DBT), and Bayesian neural network-based models. The experimental results show that our model outperforms existing models in both accuracy and inference time. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01159v1-abstract-full').style.display = 'none'; document.getElementById('2411.01159v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 1 figure, 4 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.00332">arXiv:2411.00332</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.00332">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Mesoscale and Nanoscale Physics">cond-mat.mes-hall</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> In-situ Self-optimization of Quantum Dot Emission for Lasers by Machine-Learning Assisted Epitaxy </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shen%2C+C">Chao Shen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhan%2C+W">Wenkang Zhan</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+S">Shujie Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Hao%2C+H">Hongyue Hao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhuo%2C+N">Ning Zhuo</a>, <a href="/search/cs?searchtype=author&amp;query=Xin%2C+K">Kaiyao Xin</a>, <a href="/search/cs?searchtype=author&amp;query=Cong%2C+H">Hui Cong</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+C">Chi Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bo Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Ng%2C+T+K">Tien Khee Ng</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+S">Siming Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Xue%2C+C">Chunlai Xue</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+F">Fengqi Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhanguo Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chao Zhao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.00332v1-abstract-short" style="display: inline;"> Traditional methods for optimizing light source emissions rely on a time-consuming trial-and-error approach. While in-situ optimization of light source gain media emission during growth is ideal, it has yet to be realized. In this work, we integrate in-situ reflection high-energy electron diffraction (RHEED) with machine learning (ML) to correlate the surface reconstruction with the photoluminesce&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.00332v1-abstract-full').style.display = 'inline'; document.getElementById('2411.00332v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.00332v1-abstract-full" style="display: none;"> Traditional methods for optimizing light source emissions rely on a time-consuming trial-and-error approach. While in-situ optimization of light source gain media emission during growth is ideal, it has yet to be realized. In this work, we integrate in-situ reflection high-energy electron diffraction (RHEED) with machine learning (ML) to correlate the surface reconstruction with the photoluminescence (PL) of InAs/GaAs quantum dots (QDs), which serve as the active region of lasers. A lightweight ResNet-GLAM model is employed for the real-time processing of RHEED data as input, enabling effective identification of optical performance. This approach guides the dynamic optimization of growth parameters, allowing real-time feedback control to adjust the QDs emission for lasers. We successfully optimized InAs QDs on GaAs substrates, with a 3.2-fold increase in PL intensity and a reduction in full width at half maximum (FWHM) from 36.69 meV to 28.17 meV under initially suboptimal growth conditions. Our automated, in-situ self-optimized lasers with 5-layer InAs QDs achieved electrically pumped continuous-wave operation at 1240 nm with a low threshold current of 150 A/cm2 at room temperature, an excellent performance comparable to samples grown through traditional manual multi-parameter optimization methods. These results mark a significant step toward intelligent, low-cost, and reproductive light emitters production. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.00332v1-abstract-full').style.display = 'none'; document.getElementById('2411.00332v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.00144">arXiv:2411.00144</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.00144">pdf</a>, <a href="https://arxiv.org/format/2411.00144">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Graphics">cs.GR</span> </div> </div> <p class="title is-5 mathjax"> Self-Ensembling Gaussian Splatting for Few-shot Novel View Synthesis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chen Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+X">Xuan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+T">Tong Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Javed%2C+S">Saqib Javed</a>, <a href="/search/cs?searchtype=author&amp;query=Salzmann%2C+M">Mathieu Salzmann</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.00144v1-abstract-short" style="display: inline;"> 3D Gaussian Splatting (3DGS) has demonstrated remarkable effectiveness for novel view synthesis (NVS). However, the 3DGS model tends to overfit when trained with sparse posed views, limiting its generalization capacity for broader pose variations. In this paper, we alleviate the overfitting problem by introducing a self-ensembling Gaussian Splatting (SE-GS) approach. We present two Gaussian Splatt&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.00144v1-abstract-full').style.display = 'inline'; document.getElementById('2411.00144v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.00144v1-abstract-full" style="display: none;"> 3D Gaussian Splatting (3DGS) has demonstrated remarkable effectiveness for novel view synthesis (NVS). However, the 3DGS model tends to overfit when trained with sparse posed views, limiting its generalization capacity for broader pose variations. In this paper, we alleviate the overfitting problem by introducing a self-ensembling Gaussian Splatting (SE-GS) approach. We present two Gaussian Splatting models named the $\mathbf危$-model and the $\mathbf螖$-model. The $\mathbf危$-model serves as the primary model that generates novel-view images during inference. At the training stage, the $\mathbf危$-model is guided away from specific local optima by an uncertainty-aware perturbing strategy. We dynamically perturb the $\mathbf螖$-model based on the uncertainties of novel-view renderings across different training steps, resulting in diverse temporal models sampled from the Gaussian parameter space without additional training costs. The geometry of the $\mathbf危$-model is regularized by penalizing discrepancies between the $\mathbf危$-model and the temporal samples. Therefore, our SE-GS conducts an effective and efficient regularization across a large number of Gaussian Splatting models, resulting in a robust ensemble, the $\mathbf危$-model. Experimental results on the LLFF, Mip-NeRF360, DTU, and MVImgNet datasets show that our approach improves NVS quality with few-shot training views, outperforming existing state-of-the-art methods. The code is released at https://github.com/sailor-z/SE-GS. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.00144v1-abstract-full').style.display = 'none'; document.getElementById('2411.00144v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.24203">arXiv:2410.24203</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.24203">pdf</a>, <a href="https://arxiv.org/format/2410.24203">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Graphics">cs.GR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> DiffPano: Scalable and Consistent Text to Panorama Generation with Spherical Epipolar-Aware Diffusion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ye%2C+W">Weicai Ye</a>, <a href="/search/cs?searchtype=author&amp;query=Ji%2C+C">Chenhao Ji</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Z">Zheng Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+J">Junyao Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+X">Xiaoshui Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+S">Song-Hai Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Ouyang%2C+W">Wanli Ouyang</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+T">Tong He</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Cairong Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+G">Guofeng Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.24203v1-abstract-short" style="display: inline;"> Diffusion-based methods have achieved remarkable achievements in 2D image or 3D object generation, however, the generation of 3D scenes and even $360^{\circ}$ images remains constrained, due to the limited number of scene datasets, the complexity of 3D scenes themselves, and the difficulty of generating consistent multi-view images. To address these issues, we first establish a large-scale panoram&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.24203v1-abstract-full').style.display = 'inline'; document.getElementById('2410.24203v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.24203v1-abstract-full" style="display: none;"> Diffusion-based methods have achieved remarkable achievements in 2D image or 3D object generation, however, the generation of 3D scenes and even $360^{\circ}$ images remains constrained, due to the limited number of scene datasets, the complexity of 3D scenes themselves, and the difficulty of generating consistent multi-view images. To address these issues, we first establish a large-scale panoramic video-text dataset containing millions of consecutive panoramic keyframes with corresponding panoramic depths, camera poses, and text descriptions. Then, we propose a novel text-driven panoramic generation framework, termed DiffPano, to achieve scalable, consistent, and diverse panoramic scene generation. Specifically, benefiting from the powerful generative capabilities of stable diffusion, we fine-tune a single-view text-to-panorama diffusion model with LoRA on the established panoramic video-text dataset. We further design a spherical epipolar-aware multi-view diffusion model to ensure the multi-view consistency of the generated panoramic images. Extensive experiments demonstrate that DiffPano can generate scalable, consistent, and diverse panoramic images with given unseen text descriptions and camera poses. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.24203v1-abstract-full').style.display = 'none'; document.getElementById('2410.24203v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">NeurIPS2024, Project: https://github.com/zju3dv/DiffPano; Code: https://github.com/zju3dv/DiffPano</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.23745">arXiv:2410.23745</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.23745">pdf</a>, <a href="https://arxiv.org/format/2410.23745">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Performance">cs.PF</span> </div> </div> <p class="title is-5 mathjax"> Syno: Structured Synthesis for Neural Operators </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhuo%2C+Y">Yongqi Zhuo</a>, <a href="/search/cs?searchtype=author&amp;query=Su%2C+Z">Zhengyuan Su</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chenggang Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+M">Mingyu Gao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.23745v1-abstract-short" style="display: inline;"> The desires for better prediction accuracy and higher execution performance in neural networks never end. Neural architecture search (NAS) and tensor compilers are two popular techniques to optimize these two goals, but they are both limited to composing or optimizing existing manually designed operators rather than coming up with completely new designs. In this work, we explore the less studied d&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.23745v1-abstract-full').style.display = 'inline'; document.getElementById('2410.23745v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.23745v1-abstract-full" style="display: none;"> The desires for better prediction accuracy and higher execution performance in neural networks never end. Neural architecture search (NAS) and tensor compilers are two popular techniques to optimize these two goals, but they are both limited to composing or optimizing existing manually designed operators rather than coming up with completely new designs. In this work, we explore the less studied direction of neural operator synthesis, which aims to automatically and efficiently discover novel neural operators with better accuracy and/or speed. We develop an end-to-end framework Syno, to realize practical neural operator synthesis. Syno makes use of a novel set of fine-grained primitives defined on tensor dimensions, which ensure various desired properties to ease model training, and also enable expression canonicalization techniques to avoid redundant candidates during search. Syno further adopts a novel guided synthesis flow to obtain valid operators matched with the specified input/output dimension sizes, and leverages efficient stochastic tree search algorithms to quickly explore the design space. We demonstrate that Syno discovers better operators with an average of $2.06\times$ speedup and less than $1\%$ accuracy loss, even on NAS-optimized models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.23745v1-abstract-full').style.display = 'none'; document.getElementById('2410.23745v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.20642">arXiv:2410.20642</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.20642">pdf</a>, <a href="https://arxiv.org/format/2410.20642">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> </div> </div> <p class="title is-5 mathjax"> Collaborative Knowledge Fusion: A Novel Approach for Multi-task Recommender Systems via LLMs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chuang Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Su%2C+X">Xing Su</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+M">Ming He</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+H">Hongke Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Fan%2C+J">Jianping Fan</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+X">Xiaomeng Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.20642v1-abstract-short" style="display: inline;"> Owing to the impressive general intelligence of large language models (LLMs), there has been a growing trend to integrate them into recommender systems to gain a more profound insight into human interests and intentions. Existing LLMs-based recommender systems primarily leverage item attributes and user interaction histories in textual format, improving the single task like rating prediction or ex&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.20642v1-abstract-full').style.display = 'inline'; document.getElementById('2410.20642v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.20642v1-abstract-full" style="display: none;"> Owing to the impressive general intelligence of large language models (LLMs), there has been a growing trend to integrate them into recommender systems to gain a more profound insight into human interests and intentions. Existing LLMs-based recommender systems primarily leverage item attributes and user interaction histories in textual format, improving the single task like rating prediction or explainable recommendation. Nevertheless, these approaches overlook the crucial contribution of traditional collaborative signals in discerning users&#39; profound intentions and disregard the interrelatedness among tasks. To address these limitations, we introduce a novel framework known as CKF, specifically developed to boost multi-task recommendations via personalized collaborative knowledge fusion into LLMs. Specifically, our method synergizes traditional collaborative filtering models to produce collaborative embeddings, subsequently employing the meta-network to construct personalized mapping bridges tailored for each user. Upon mapped, the embeddings are incorporated into meticulously designed prompt templates and then fed into an advanced LLM to represent user interests. To investigate the intrinsic relationship among diverse recommendation tasks, we develop Multi-Lora, a new parameter-efficient approach for multi-task optimization, adept at distinctly segregating task-shared and task-specific information. This method forges a connection between LLMs and recommendation scenarios, while simultaneously enriching the supervisory signal through mutual knowledge transfer among various tasks. Extensive experiments and in-depth robustness analyses across four common recommendation tasks on four large public data sets substantiate the effectiveness and superiority of our framework. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.20642v1-abstract-full').style.display = 'none'; document.getElementById('2410.20642v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.18228">arXiv:2410.18228</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.18228">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> MsMorph: An Unsupervised pyramid learning network for brain image registration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Nan%2C+J">Jiaofen Nan</a>, <a href="/search/cs?searchtype=author&amp;query=Fan%2C+G">Gaodeng Fan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+K">Kaifan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chen Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+F">Fubao Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+W">Weihua Zhou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.18228v1-abstract-short" style="display: inline;"> In the field of medical image analysis, image registration is a crucial technique. Despite the numerous registration models that have been proposed, existing methods still fall short in terms of accuracy and interpretability. In this paper, we present MsMorph, a deep learning-based image registration framework aimed at mimicking the manual process of registering image pairs to achieve more similar&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.18228v1-abstract-full').style.display = 'inline'; document.getElementById('2410.18228v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.18228v1-abstract-full" style="display: none;"> In the field of medical image analysis, image registration is a crucial technique. Despite the numerous registration models that have been proposed, existing methods still fall short in terms of accuracy and interpretability. In this paper, we present MsMorph, a deep learning-based image registration framework aimed at mimicking the manual process of registering image pairs to achieve more similar deformations, where the registered image pairs exhibit consistency or similarity in features. By extracting the feature differences between image pairs across various as-pects using gradients, the framework decodes semantic information at different scales and continuously compen-sates for the predicted deformation field, driving the optimization of parameters to significantly improve registration accuracy. The proposed method simulates the manual approach to registration, focusing on different regions of the image pairs and their neighborhoods to predict the deformation field between the two images, which provides strong interpretability. We compared several existing registration methods on two public brain MRI datasets, including LPBA and Mindboggle. The experimental results show that our method consistently outperforms state of the art in terms of metrics such as Dice score, Hausdorff distance, average symmetric surface distance, and non-Jacobian. The source code is publicly available at https://github.com/GaodengFan/MsMorph <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.18228v1-abstract-full').style.display = 'none'; document.getElementById('2410.18228v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 10 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.17526">arXiv:2410.17526</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.17526">pdf</a>, <a href="https://arxiv.org/format/2410.17526">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> GDDA: Semantic OOD Detection on Graphs under Covariate Shift via Score-Based Diffusion Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=He%2C+Z">Zhixia He</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chen Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Shao%2C+M">Minglai Shao</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+Y">Yujie Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+D">Dong Li</a>, <a href="/search/cs?searchtype=author&amp;query=Tian%2C+Q">Qin Tian</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.17526v1-abstract-short" style="display: inline;"> Out-of-distribution (OOD) detection poses a significant challenge for Graph Neural Networks (GNNs), particularly in open-world scenarios with varying distribution shifts. Most existing OOD detection methods on graphs primarily focus on identifying instances in test data domains caused by either semantic shifts (changes in data classes) or covariate shifts (changes in data features), while leaving&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.17526v1-abstract-full').style.display = 'inline'; document.getElementById('2410.17526v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.17526v1-abstract-full" style="display: none;"> Out-of-distribution (OOD) detection poses a significant challenge for Graph Neural Networks (GNNs), particularly in open-world scenarios with varying distribution shifts. Most existing OOD detection methods on graphs primarily focus on identifying instances in test data domains caused by either semantic shifts (changes in data classes) or covariate shifts (changes in data features), while leaving the simultaneous occurrence of both distribution shifts under-explored. In this work, we address both types of shifts simultaneously and introduce a novel challenge for OOD detection on graphs: graph-level semantic OOD detection under covariate shift. In this scenario, variations between the training and test domains result from the concurrent presence of both covariate and semantic shifts, where only graphs associated with unknown classes are identified as OOD samples (OODs). To tackle this challenge, we propose a novel two-phase framework called Graph Disentangled Diffusion Augmentation (GDDA). The first phase focuses on disentangling graph representations into domain-invariant semantic factors and domain-specific style factors. In the second phase, we introduce a novel distribution-shift-controlled score-based generative diffusion model that generates latent factors outside the training semantic and style spaces. Additionally, auxiliary pseudo-in-distribution (InD) and pseudo-OOD graph representations are employed to enhance the effectiveness of the energy-based semantic OOD detector. Extensive empirical studies on three benchmark datasets demonstrate that our approach outperforms state-of-the-art baselines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.17526v1-abstract-full').style.display = 'none'; document.getElementById('2410.17526v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">4 pages, 6 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.17434">arXiv:2410.17434</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.17434">pdf</a>, <a href="https://arxiv.org/format/2410.17434">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> LongVU: Spatiotemporal Adaptive Compression for Long Video-Language Understanding </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shen%2C+X">Xiaoqian Shen</a>, <a href="/search/cs?searchtype=author&amp;query=Xiong%2C+Y">Yunyang Xiong</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Changsheng Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+L">Lemeng Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jun Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+C">Chenchen Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zechun Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Xiao%2C+F">Fanyi Xiao</a>, <a href="/search/cs?searchtype=author&amp;query=Varadarajan%2C+B">Balakrishnan Varadarajan</a>, <a href="/search/cs?searchtype=author&amp;query=Bordes%2C+F">Florian Bordes</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zhuang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+H">Hu Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+H+J">Hyunwoo J. Kim</a>, <a href="/search/cs?searchtype=author&amp;query=Soran%2C+B">Bilge Soran</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnamoorthi%2C+R">Raghuraman Krishnamoorthi</a>, <a href="/search/cs?searchtype=author&amp;query=Elhoseiny%2C+M">Mohamed Elhoseiny</a>, <a href="/search/cs?searchtype=author&amp;query=Chandra%2C+V">Vikas Chandra</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.17434v1-abstract-short" style="display: inline;"> Multimodal Large Language Models (MLLMs) have shown promising progress in understanding and analyzing video content. However, processing long videos remains a significant challenge constrained by LLM&#39;s context size. To address this limitation, we propose LongVU, a spatiotemporal adaptive compression mechanism thats reduces the number of video tokens while preserving visual details of long videos.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.17434v1-abstract-full').style.display = 'inline'; document.getElementById('2410.17434v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.17434v1-abstract-full" style="display: none;"> Multimodal Large Language Models (MLLMs) have shown promising progress in understanding and analyzing video content. However, processing long videos remains a significant challenge constrained by LLM&#39;s context size. To address this limitation, we propose LongVU, a spatiotemporal adaptive compression mechanism thats reduces the number of video tokens while preserving visual details of long videos. Our idea is based on leveraging cross-modal query and inter-frame dependencies to adaptively reduce temporal and spatial redundancy in videos. Specifically, we leverage DINOv2 features to remove redundant frames that exhibit high similarity. Then we utilize text-guided cross-modal query for selective frame feature reduction. Further, we perform spatial token reduction across frames based on their temporal dependencies. Our adaptive compression strategy effectively processes a large number of frames with little visual information loss within given context length. Our LongVU consistently surpass existing methods across a variety of video understanding benchmarks, especially on hour-long video understanding tasks such as VideoMME and MLVU. Given a light-weight LLM, our LongVU also scales effectively into a smaller size with state-of-the-art video understanding performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.17434v1-abstract-full').style.display = 'none'; document.getElementById('2410.17434v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Project page: https://vision-cair.github.io/LongVU</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.16670">arXiv:2410.16670</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.16670">pdf</a>, <a href="https://arxiv.org/format/2410.16670">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> CoPS: Empowering LLM Agents with Provable Cross-Task Experience Sharing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yang%2C+C">Chen Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chenyang Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Gu%2C+Q">Quanquan Gu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+D">Dongruo Zhou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.16670v1-abstract-short" style="display: inline;"> Sequential reasoning in agent systems has been significantly advanced by large language models (LLMs), yet existing approaches face limitations. Reflection-driven reasoning relies solely on knowledge in pretrained models, limiting performance in novel scenarios, while experience-assisted reasoning often depends on external experiences and lacks clear principles for selecting representative experie&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.16670v1-abstract-full').style.display = 'inline'; document.getElementById('2410.16670v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.16670v1-abstract-full" style="display: none;"> Sequential reasoning in agent systems has been significantly advanced by large language models (LLMs), yet existing approaches face limitations. Reflection-driven reasoning relies solely on knowledge in pretrained models, limiting performance in novel scenarios, while experience-assisted reasoning often depends on external experiences and lacks clear principles for selecting representative experiences. We address these limitations by proposing CoPS (Cross-Task Experience Sharing), a generalizable algorithm that enhances sequential reasoning by cross-task experience sharing and selection. In detail, CoPS leverages agents&#39; experiences on previous tasks, selecting distribution-matched experiences via a provable pessimism-based strategy to maximize utility while minimizing risks from distribution shifts. Extensive experimental results on benchmarks like Alfworld, Webshop, and HotPotQA demonstrate that CoPS consistently outperforms state-of-the-art baselines, with superior sample efficiency suitable for resource-constrained scenarios. Theoretically, we show that the performance of our algorithm depends on both the quality of the pretrained LLM and the matching between the agent&#39;s task-dependent trial distribution and that generated by the LLM. Our work bridges the gap between existing sequential reasoning paradigms and validates the effectiveness of leveraging cross-task experiences, shedding light on the potential to improve agents&#39; generalization and adaptability across diverse tasks. Our codes are available at $\href{https://github.com/uclaml/COPS}{\text{https://github.com/uclaml/COPS}}$. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.16670v1-abstract-full').style.display = 'none'; document.getElementById('2410.16670v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">25 pages, 5 tables, 3 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.15959">arXiv:2410.15959</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.15959">pdf</a>, <a href="https://arxiv.org/format/2410.15959">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Diffusion Transformer Policy </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hou%2C+Z">Zhi Hou</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+T">Tianyi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Xiong%2C+Y">Yuwen Xiong</a>, <a href="/search/cs?searchtype=author&amp;query=Pu%2C+H">Hengjun Pu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chengyang Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Tong%2C+R">Ronglei Tong</a>, <a href="/search/cs?searchtype=author&amp;query=Qiao%2C+Y">Yu Qiao</a>, <a href="/search/cs?searchtype=author&amp;query=Dai%2C+J">Jifeng Dai</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Yuntao Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.15959v1-abstract-short" style="display: inline;"> Recent large visual-language action models pretrained on diverse robot datasets have demonstrated the potential for generalizing to new environments with a few in-domain data. However, those approaches usually predict discretized or continuous actions by a small action head, which limits the ability in handling diverse action spaces. In contrast, we model the continuous action with a large multi-m&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.15959v1-abstract-full').style.display = 'inline'; document.getElementById('2410.15959v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.15959v1-abstract-full" style="display: none;"> Recent large visual-language action models pretrained on diverse robot datasets have demonstrated the potential for generalizing to new environments with a few in-domain data. However, those approaches usually predict discretized or continuous actions by a small action head, which limits the ability in handling diverse action spaces. In contrast, we model the continuous action with a large multi-modal diffusion transformer, dubbed as Diffusion Transformer Policy, in which we directly denoise action chunks by a large transformer model rather than a small action head. By leveraging the scaling capability of transformers, the proposed approach can effectively model continuous end-effector actions across large diverse robot datasets, and achieve better generalization performance. Extensive experiments demonstrate Diffusion Transformer Policy pretrained on diverse robot data can generalize to different embodiments, including simulation environments like Maniskill2 and Calvin, as well as the real-world Franka arm. Specifically, without bells and whistles, the proposed approach achieves state-of-the-art performance with only a single third-view camera stream in the Calvin novel task setting (ABC-&gt;D), improving the average number of tasks completed in a row of 5 to 3.6, and the pretraining stage significantly facilitates the success sequence length on the Calvin by over 1.2. The code will be publicly available. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.15959v1-abstract-full').style.display = 'none'; document.getElementById('2410.15959v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Preprint</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.15885">arXiv:2410.15885</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.15885">pdf</a>, <a href="https://arxiv.org/format/2410.15885">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> How to Build a Pre-trained Multimodal model for Simultaneously Chatting and Decision-making? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tang%2C+Z">Zuojin Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+B">Bin Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chenyang Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+D">De Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+G">Gang Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+B">Bin Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.15885v1-abstract-short" style="display: inline;"> Existing large pre-trained models typically map text input to text output in an end-to-end manner, such as ChatGPT, or map a segment of text input to a hierarchy of action decisions, such as OpenVLA. However, humans can simultaneously generate text and actions when receiving specific input signals. For example, a driver can make precise driving decisions while conversing with a friend in the passe&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.15885v1-abstract-full').style.display = 'inline'; document.getElementById('2410.15885v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.15885v1-abstract-full" style="display: none;"> Existing large pre-trained models typically map text input to text output in an end-to-end manner, such as ChatGPT, or map a segment of text input to a hierarchy of action decisions, such as OpenVLA. However, humans can simultaneously generate text and actions when receiving specific input signals. For example, a driver can make precise driving decisions while conversing with a friend in the passenger seat. Motivated by this observation, we consider the following question in this work: is it possible to construct a pre-trained model that can provide both language interaction and precise decision-making capabilities in dynamic open scenarios. We provide a definitive answer to this question by developing a new model architecture termed Visual Language Action model for Chatting and Decision Making (VLA4CD), and further demonstrating its performance in challenging autonomous driving tasks. Specifically, we leverage LoRA to fine-tune a pre-trained LLM with data of multiple modalities covering language, visual, and action. Unlike the existing LoRA operations used for LLM fine-tuning, we have designed new computational modules and training cost functions for VLA4CD. These designs enable VLA4CD to provide continuous-valued action decisions while outputting text responses. In contrast, existing LLMs can only output text responses, and current VLA models can only output action decisions. Moreover, these VLA models handle action data by discretizing and then tokenizing the discretized actions, a method unsuitable for complex decision-making tasks involving high-dimensional continuous-valued action vectors, such as autonomous driving. The experimental results on CARLA validate that: (1) our proposed model construction method is effective; (2) compared to the SOTA VLA model, VLA4CD can provide more accurate real-time decision-making while retaining the text interaction capability inherent to LLMs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.15885v1-abstract-full').style.display = 'none'; document.getElementById('2410.15885v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.11046">arXiv:2410.11046</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.11046">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> </div> <p class="title is-5 mathjax"> SGUQ: Staged Graph Convolution Neural Network for Alzheimer&#39;s Disease Diagnosis using Multi-Omics Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tao%2C+L">Liang Tao</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+Y">Yixin Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+J+D">Jeffrey D Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Shen%2C+H">Hui Shen</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+H">Hong-Wen Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+W">Weihua Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chen Zhao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.11046v1-abstract-short" style="display: inline;"> Alzheimer&#39;s disease (AD) is a chronic neurodegenerative disorder and the leading cause of dementia, significantly impacting cost, mortality, and burden worldwide. The advent of high-throughput omics technologies, such as genomics, transcriptomics, proteomics, and epigenomics, has revolutionized the molecular understanding of AD. Conventional AI approaches typically require the completion of all om&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.11046v1-abstract-full').style.display = 'inline'; document.getElementById('2410.11046v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.11046v1-abstract-full" style="display: none;"> Alzheimer&#39;s disease (AD) is a chronic neurodegenerative disorder and the leading cause of dementia, significantly impacting cost, mortality, and burden worldwide. The advent of high-throughput omics technologies, such as genomics, transcriptomics, proteomics, and epigenomics, has revolutionized the molecular understanding of AD. Conventional AI approaches typically require the completion of all omics data at the outset to achieve optimal AD diagnosis, which are inefficient and may be unnecessary. To reduce the clinical cost and improve the accuracy of AD diagnosis using multi-omics data, we propose a novel staged graph convolutional network with uncertainty quantification (SGUQ). SGUQ begins with mRNA and progressively incorporates DNA methylation and miRNA data only when necessary, reducing overall costs and exposure to harmful tests. Experimental results indicate that 46.23% of the samples can be reliably predicted using only single-modal omics data (mRNA), while an additional 16.04% of the samples can achieve reliable predictions when combining two omics data types (mRNA + DNA methylation). In addition, the proposed staged SGUQ achieved an accuracy of 0.858 on ROSMAP dataset, which outperformed existing methods significantly. The proposed SGUQ can not only be applied to AD diagnosis using multi-omics data but also has the potential for clinical decision-making using multi-viewed data. Our implementation is publicly available at https://github.com/chenzhao2023/multiomicsuncertainty. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.11046v1-abstract-full').style.display = 'none'; document.getElementById('2410.11046v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">20 pages, 2 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.10934">arXiv:2410.10934</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.10934">pdf</a>, <a href="https://arxiv.org/format/2410.10934">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Agent-as-a-Judge: Evaluate Agents with Agents </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhuge%2C+M">Mingchen Zhuge</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Changsheng Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Ashley%2C+D">Dylan Ashley</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+W">Wenyi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Khizbullin%2C+D">Dmitrii Khizbullin</a>, <a href="/search/cs?searchtype=author&amp;query=Xiong%2C+Y">Yunyang Xiong</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zechun Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Chang%2C+E">Ernie Chang</a>, <a href="/search/cs?searchtype=author&amp;query=Krishnamoorthi%2C+R">Raghuraman Krishnamoorthi</a>, <a href="/search/cs?searchtype=author&amp;query=Tian%2C+Y">Yuandong Tian</a>, <a href="/search/cs?searchtype=author&amp;query=Shi%2C+Y">Yangyang Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Chandra%2C+V">Vikas Chandra</a>, <a href="/search/cs?searchtype=author&amp;query=Schmidhuber%2C+J">J眉rgen Schmidhuber</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.10934v2-abstract-short" style="display: inline;"> Contemporary evaluation techniques are inadequate for agentic systems. These approaches either focus exclusively on final outcomes -- ignoring the step-by-step nature of agentic systems, or require excessive manual labour. To address this, we introduce the Agent-as-a-Judge framework, wherein agentic systems are used to evaluate agentic systems. This is an organic extension of the LLM-as-a-Judge fr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.10934v2-abstract-full').style.display = 'inline'; document.getElementById('2410.10934v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.10934v2-abstract-full" style="display: none;"> Contemporary evaluation techniques are inadequate for agentic systems. These approaches either focus exclusively on final outcomes -- ignoring the step-by-step nature of agentic systems, or require excessive manual labour. To address this, we introduce the Agent-as-a-Judge framework, wherein agentic systems are used to evaluate agentic systems. This is an organic extension of the LLM-as-a-Judge framework, incorporating agentic features that enable intermediate feedback for the entire task-solving process. We apply the Agent-as-a-Judge to the task of code generation. To overcome issues with existing benchmarks and provide a proof-of-concept testbed for Agent-as-a-Judge, we present DevAI, a new benchmark of 55 realistic automated AI development tasks. It includes rich manual annotations, like a total of 365 hierarchical user requirements. We benchmark three of the popular agentic systems using Agent-as-a-Judge and find it dramatically outperforms LLM-as-a-Judge and is as reliable as our human evaluation baseline. Altogether, we believe that Agent-as-a-Judge marks a concrete step forward for modern agentic systems -- by providing rich and reliable reward signals necessary for dynamic and scalable self-improvement. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.10934v2-abstract-full').style.display = 'none'; document.getElementById('2410.10934v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The project can be found at https://github.com/metauto-ai/agent-as-a-judge. The dataset is released at https://huggingface.co/DEVAI-benchmark</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.06534">arXiv:2410.06534</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.06534">pdf</a>, <a href="https://arxiv.org/format/2410.06534">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neurons and Cognition">q-bio.NC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> EEG-estimated functional connectivity, and not behavior, differentiates Parkinson&#39;s patients from health controls during the Simon conflict task </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sun%2C+X">Xiaoxiao Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chongkun Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Koorathota%2C+S">Sharath Koorathota</a>, <a href="/search/cs?searchtype=author&amp;query=Sajda%2C+P">Paul Sajda</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.06534v1-abstract-short" style="display: inline;"> Neural biomarkers that can classify or predict disease are of broad interest to the neurological and psychiatric communities. Such biomarkers can be informative of disease state or treatment efficacy, even before there are changes in symptoms and/or behavior. This work investigates EEG-estimated functional connectivity (FC) as a Parkinson&#39;s Disease (PD) biomarker. Specifically, we investigate FC m&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.06534v1-abstract-full').style.display = 'inline'; document.getElementById('2410.06534v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.06534v1-abstract-full" style="display: none;"> Neural biomarkers that can classify or predict disease are of broad interest to the neurological and psychiatric communities. Such biomarkers can be informative of disease state or treatment efficacy, even before there are changes in symptoms and/or behavior. This work investigates EEG-estimated functional connectivity (FC) as a Parkinson&#39;s Disease (PD) biomarker. Specifically, we investigate FC mediated via neural oscillations and consider such activity during the Simons conflict task. This task yields sensory-motor conflict, and one might expect differences in behavior between PD patients and healthy controls (HCs). In addition to considering spatially focused approaches, such as FC, as a biomarker, we also consider temporal biomarkers, which are more sensitive to ongoing changes in neural activity. We find that FC, estimated from delta (1-4Hz) and theta (4-7Hz) oscillations, yields spatial FC patterns significantly better at distinguishing PD from HC than temporal features or behavior. This study reinforces that FC in spectral bands is informative of differences in brain-wide processes and can serve as a biomarker distinguishing normal brain function from that seen in disease. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.06534v1-abstract-full').style.display = 'none'; document.getElementById('2410.06534v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This work is accepted at IEEE EMBC 2024. Personal use is permitted, but republication/redistribution requires IEEE permission. See http://www.ieee.org/publications standards/publications/rights/index.html for more information</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.04342">arXiv:2410.04342</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.04342">pdf</a>, <a href="https://arxiv.org/format/2410.04342">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Accelerating Inference of Networks in the Frequency Domain </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chenqiu Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+G">Guanfang Dong</a>, <a href="/search/cs?searchtype=author&amp;query=Basu%2C+A">Anup Basu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.04342v1-abstract-short" style="display: inline;"> It has been demonstrated that networks&#39; parameters can be significantly reduced in the frequency domain with a very small decrease in accuracy. However, given the cost of frequency transforms, the computational complexity is not significantly decreased. In this work, we propose performing network inference in the frequency domain to speed up networks whose frequency parameters are sparse. In parti&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.04342v1-abstract-full').style.display = 'inline'; document.getElementById('2410.04342v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.04342v1-abstract-full" style="display: none;"> It has been demonstrated that networks&#39; parameters can be significantly reduced in the frequency domain with a very small decrease in accuracy. However, given the cost of frequency transforms, the computational complexity is not significantly decreased. In this work, we propose performing network inference in the frequency domain to speed up networks whose frequency parameters are sparse. In particular, we propose a frequency inference chain that is dual to the network inference in the spatial domain. In order to handle the non-linear layers, we make a compromise to apply non-linear operations on frequency data directly, which works effectively. Enabled by the frequency inference chain and the strategy for non-linear layers, the proposed approach completes the entire inference in the frequency domain. Unlike previous approaches which require extra frequency or inverse transforms for all layers, the proposed approach only needs the frequency transform and its inverse once at the beginning and once at the end of a network. Comparisons with state-of-the-art methods demonstrate that the proposed approach significantly improves accuracy in the case of a high speedup ratio (over 100x). The source code is available at \url{https://github.com/guanfangdong/FreqNet-Infer}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.04342v1-abstract-full').style.display = 'none'; document.getElementById('2410.04342v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">accepted by ACM Multimedia Asia 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.04232">arXiv:2410.04232</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.04232">pdf</a>, <a href="https://arxiv.org/format/2410.04232">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Be There, Be Together, Be Streamed! AR Scenic Live-Streaming for an Interactive and Collective Experience </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Huang%2C+Z">Zeyu Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Z">Zuyu Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yuanhao Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+C">Chengzhong Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Y">Yanwei Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Shi%2C+C">Chuhan Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+J+C">Jason Chen Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+X">Xiaojuan Ma</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.04232v1-abstract-short" style="display: inline;"> Scenic Live-Streaming (SLS), capturing real-world scenic sites from fixed cameras without streamers, combines scene immersion and the social and real-time characteristics of live-streaming into a unique experience. However, existing SLS affords limited audience interactions to engage them in a collective experience compared to many other live-streaming genres. It is also difficult for SLS to recre&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.04232v1-abstract-full').style.display = 'inline'; document.getElementById('2410.04232v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.04232v1-abstract-full" style="display: none;"> Scenic Live-Streaming (SLS), capturing real-world scenic sites from fixed cameras without streamers, combines scene immersion and the social and real-time characteristics of live-streaming into a unique experience. However, existing SLS affords limited audience interactions to engage them in a collective experience compared to many other live-streaming genres. It is also difficult for SLS to recreate important but intangible constituents of in-person trip experiences, such as cultural activities. To offer a more interactive, engaging, and meaningful experience, we propose ARSLS (Augmented Reality Scenic Live-Streaming). Culturally grounded AR objects with awareness of the live-streamed environment can be overlaid over camera views to provide additional interactive features while maintaining consistency with the live-streamed scene. To explore the design space of this new medium, we developed an ARSLS prototype for a famous landscape in China. A preliminary study (N=15) provided initial insights for ARSLS design. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.04232v1-abstract-full').style.display = 'none'; document.getElementById('2410.04232v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">4 pages, 2 figures, to appear in the adjunct proceedings of ISMAR 2024 and the ISMAR 2024 conference</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.03083">arXiv:2410.03083</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.03083">pdf</a>, <a href="https://arxiv.org/format/2410.03083">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Scaling Parameter-Constrained Language Models with Quality Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chang%2C+E">Ernie Chang</a>, <a href="/search/cs?searchtype=author&amp;query=Paltenghi%2C+M">Matteo Paltenghi</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yang Li</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+P">Pin-Jie Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Changsheng Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Huber%2C+P">Patrick Huber</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zechun Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Rabatin%2C+R">Rastislav Rabatin</a>, <a href="/search/cs?searchtype=author&amp;query=Shi%2C+Y">Yangyang Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Chandra%2C+V">Vikas Chandra</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.03083v1-abstract-short" style="display: inline;"> Scaling laws in language modeling traditionally quantify training loss as a function of dataset size and model parameters, providing compute-optimal estimates but often neglecting the impact of data quality on model generalization. In this paper, we extend the conventional understanding of scaling law by offering a microscopic view of data quality within the original formulation -- effective train&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.03083v1-abstract-full').style.display = 'inline'; document.getElementById('2410.03083v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.03083v1-abstract-full" style="display: none;"> Scaling laws in language modeling traditionally quantify training loss as a function of dataset size and model parameters, providing compute-optimal estimates but often neglecting the impact of data quality on model generalization. In this paper, we extend the conventional understanding of scaling law by offering a microscopic view of data quality within the original formulation -- effective training tokens -- which we posit to be a critical determinant of performance for parameter-constrained language models. Specifically, we formulate the proposed term of effective training tokens to be a combination of two readily-computed indicators of text: (i) text diversity and (ii) syntheticity as measured by a teacher model. We pretrained over $200$ models of 25M to 1.5B parameters on a diverse set of sampled, synthetic data, and estimated the constants that relate text quality, model size, training tokens, and eight reasoning task accuracy scores. We demonstrated the estimated constants yield +0.83 Pearson correlation with true accuracies, and analyzed it in scenarios involving widely-used data techniques such as data sampling and synthesis which aim to improve data quality. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.03083v1-abstract-full').style.display = 'none'; document.getElementById('2410.03083v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to EMNLP 2024 Industry Track, 18 pages, 9 figures, 4 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.20558">arXiv:2409.20558</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.20558">pdf</a>, <a href="https://arxiv.org/format/2409.20558">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Uni$^2$Det: Unified and Universal Framework for Prompt-Guided Multi-dataset 3D Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yubin Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+Z">Zhikang Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Ye%2C+X">Xiaoqing Ye</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+X">Xiao Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Ding%2C+E">Errui Ding</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Cairong Zhao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.20558v1-abstract-short" style="display: inline;"> We present Uni$^2$Det, a brand new framework for unified and universal multi-dataset training on 3D detection, enabling robust performance across diverse domains and generalization to unseen domains. Due to substantial disparities in data distribution and variations in taxonomy across diverse domains, training such a detector by simply merging datasets poses a significant challenge. Motivated by t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.20558v1-abstract-full').style.display = 'inline'; document.getElementById('2409.20558v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.20558v1-abstract-full" style="display: none;"> We present Uni$^2$Det, a brand new framework for unified and universal multi-dataset training on 3D detection, enabling robust performance across diverse domains and generalization to unseen domains. Due to substantial disparities in data distribution and variations in taxonomy across diverse domains, training such a detector by simply merging datasets poses a significant challenge. Motivated by this observation, we introduce multi-stage prompting modules for multi-dataset 3D detection, which leverages prompts based on the characteristics of corresponding datasets to mitigate existing differences. This elegant design facilitates seamless plug-and-play integration within various advanced 3D detection frameworks in a unified manner, while also allowing straightforward adaptation for universal applicability across datasets. Experiments are conducted across multiple dataset consolidation scenarios involving KITTI, Waymo, and nuScenes, demonstrating that our Uni$^2$Det outperforms existing methods by a large margin in multi-dataset training. Notably, results on zero-shot cross-dataset transfer validate the generalization capability of our proposed method. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.20558v1-abstract-full').style.display = 'none'; document.getElementById('2409.20558v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages, 5 figures, 6 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.19624">arXiv:2409.19624</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.19624">pdf</a>, <a href="https://arxiv.org/format/2409.19624">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Storynizor: Consistent Story Generation via Inter-Frame Synchronized and Shuffled ID Injection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ma%2C+Y">Yuhang Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+W">Wenting Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chaoyi Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+K">Keqiang Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Jin%2C+Q">Qinfeng Jin</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Z">Zeng Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Fan%2C+C">Changjie Fan</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+Z">Zhipeng Hu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.19624v1-abstract-short" style="display: inline;"> Recent advances in text-to-image diffusion models have spurred significant interest in continuous story image generation. In this paper, we introduce Storynizor, a model capable of generating coherent stories with strong inter-frame character consistency, effective foreground-background separation, and diverse pose variation. The core innovation of Storynizor lies in its key modules: ID-Synchroniz&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.19624v1-abstract-full').style.display = 'inline'; document.getElementById('2409.19624v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.19624v1-abstract-full" style="display: none;"> Recent advances in text-to-image diffusion models have spurred significant interest in continuous story image generation. In this paper, we introduce Storynizor, a model capable of generating coherent stories with strong inter-frame character consistency, effective foreground-background separation, and diverse pose variation. The core innovation of Storynizor lies in its key modules: ID-Synchronizer and ID-Injector. The ID-Synchronizer employs an auto-mask self-attention module and a mask perceptual loss across inter-frame images to improve the consistency of character generation, vividly representing their postures and backgrounds. The ID-Injector utilize a Shuffling Reference Strategy (SRS) to integrate ID features into specific locations, enhancing ID-based consistent character generation. Additionally, to facilitate the training of Storynizor, we have curated a novel dataset called StoryDB comprising 100, 000 images. This dataset contains single and multiple-character sets in diverse environments, layouts, and gestures with detailed descriptions. Experimental results indicate that Storynizor demonstrates superior coherent story generation with high-fidelity character consistency, flexible postures, and vivid backgrounds compared to other character-specific methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.19624v1-abstract-full').style.display = 'none'; document.getElementById('2409.19624v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.17795">arXiv:2409.17795</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.17795">pdf</a>, <a href="https://arxiv.org/format/2409.17795">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Engineering, Finance, and Science">cs.CE</span> </div> </div> <p class="title is-5 mathjax"> Physics-driven complex relaxation for multi-body systems of SPH method </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chenxi Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+Y">Yongchuan Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Haidn%2C+O+J">Oskar J. Haidn</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+X">Xiangyu Hu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.17795v1-abstract-short" style="display: inline;"> In the smoothed particle dynamics (SPH) method, the characteristics of a target particle are interpolated based on the information from its neighboring particles. Consequently, a uniform initial distribution of particles significantly enhances the accuracy of SPH calculations. This aspect is particularly critical in Eulerian SPH, where particles are stationary throughout the simulation. To address&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.17795v1-abstract-full').style.display = 'inline'; document.getElementById('2409.17795v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.17795v1-abstract-full" style="display: none;"> In the smoothed particle dynamics (SPH) method, the characteristics of a target particle are interpolated based on the information from its neighboring particles. Consequently, a uniform initial distribution of particles significantly enhances the accuracy of SPH calculations. This aspect is particularly critical in Eulerian SPH, where particles are stationary throughout the simulation. To address this, we introduce a physics-driven complex relaxation method for multi-body systems. Through a series of two-dimensional and three-dimensional case studies, we demonstrate that this method is capable of achieving a globally uniform particle distribution, especially at the interfaces between contacting bodies, and ensuring improved zero-order consistency. Moreover, the effectiveness and reliability of the complex relaxation method in enhancing the accuracy of physical simulations are further validated. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.17795v1-abstract-full').style.display = 'none'; document.getElementById('2409.17795v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">38 pages and 25 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.16682">arXiv:2409.16682</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.16682">pdf</a>, <a href="https://arxiv.org/format/2409.16682">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> SynTQA: Synergistic Table-based Question Answering via Mixture of Text-to-SQL and E2E TQA </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+S">Siyue Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Luu%2C+A+T">Anh Tuan Luu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chen Zhao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.16682v2-abstract-short" style="display: inline;"> Text-to-SQL parsing and end-to-end question answering (E2E TQA) are two main approaches for Table-based Question Answering task. Despite success on multiple benchmarks, they have yet to be compared and their synergy remains unexplored. In this paper, we identify different strengths and weaknesses through evaluating state-of-the-art models on benchmark datasets: Text-to-SQL demonstrates superiority&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.16682v2-abstract-full').style.display = 'inline'; document.getElementById('2409.16682v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.16682v2-abstract-full" style="display: none;"> Text-to-SQL parsing and end-to-end question answering (E2E TQA) are two main approaches for Table-based Question Answering task. Despite success on multiple benchmarks, they have yet to be compared and their synergy remains unexplored. In this paper, we identify different strengths and weaknesses through evaluating state-of-the-art models on benchmark datasets: Text-to-SQL demonstrates superiority in handling questions involving arithmetic operations and long tables; E2E TQA excels in addressing ambiguous questions, non-standard table schema, and complex table contents. To combine both strengths, we propose a Synergistic Table-based Question Answering approach that integrate different models via answer selection, which is agnostic to any model types. Further experiments validate that ensembling models by either feature-based or LLM-based answer selector significantly improves the performance over individual models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.16682v2-abstract-full').style.display = 'none'; document.getElementById('2409.16682v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 25 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">EMNLP 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.16280">arXiv:2409.16280</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.16280">pdf</a>, <a href="https://arxiv.org/format/2409.16280">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> MonoFormer: One Transformer for Both Diffusion and Autoregression </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chuyang Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+Y">Yuxing Song</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+W">Wenhao Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+H">Haocheng Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Ding%2C+E">Errui Ding</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+Y">Yifan Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Xiao%2C+X">Xinyan Xiao</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J">Jingdong Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.16280v1-abstract-short" style="display: inline;"> Most existing multimodality methods use separate backbones for autoregression-based discrete text generation and diffusion-based continuous visual generation, or the same backbone by discretizing the visual data to use autoregression for both text and visual generation. In this paper, we propose to study a simple idea: share one transformer for both autoregression and diffusion. The feasibility co&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.16280v1-abstract-full').style.display = 'inline'; document.getElementById('2409.16280v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.16280v1-abstract-full" style="display: none;"> Most existing multimodality methods use separate backbones for autoregression-based discrete text generation and diffusion-based continuous visual generation, or the same backbone by discretizing the visual data to use autoregression for both text and visual generation. In this paper, we propose to study a simple idea: share one transformer for both autoregression and diffusion. The feasibility comes from two main aspects: (i) Transformer is successfully applied to diffusion for visual generation, and (ii) transformer training for autoregression and diffusion is very similar, and the difference merely lies in that diffusion uses bidirectional attention mask and autoregression uses causal attention mask. Experimental results show that our approach achieves comparable image generation performance to current state-of-the-art methods as well as maintains the text generation capability. The project is publicly available at https://monoformer.github.io/. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.16280v1-abstract-full').style.display = 'none'; document.getElementById('2409.16280v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.15750">arXiv:2409.15750</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.15750">pdf</a>, <a href="https://arxiv.org/format/2409.15750">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Emerging Technologies">cs.ET</span> </div> </div> <p class="title is-5 mathjax"> The Roles of Generative Artificial Intelligence in Internet of Electric Vehicles </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H">Hanwen Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Niyato%2C+D">Dusit Niyato</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+W">Wei Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Changyuan Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+H">Hongyang Du</a>, <a href="/search/cs?searchtype=author&amp;query=Jamalipour%2C+A">Abbas Jamalipour</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+S">Sumei Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Pei%2C+Y">Yiyang Pei</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.15750v3-abstract-short" style="display: inline;"> With the advancements of generative artificial intelligence (GenAI) models, their capabilities are expanding significantly beyond content generation and the models are increasingly being used across diverse applications. Particularly, GenAI shows great potential in addressing challenges in the electric vehicle (EV) ecosystem ranging from charging management to cyber-attack prevention. In this pape&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.15750v3-abstract-full').style.display = 'inline'; document.getElementById('2409.15750v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.15750v3-abstract-full" style="display: none;"> With the advancements of generative artificial intelligence (GenAI) models, their capabilities are expanding significantly beyond content generation and the models are increasingly being used across diverse applications. Particularly, GenAI shows great potential in addressing challenges in the electric vehicle (EV) ecosystem ranging from charging management to cyber-attack prevention. In this paper, we specifically consider Internet of electric vehicles (IoEV) and we categorize GenAI for IoEV into four different layers namely, EV&#39;s battery layer, individual EV layer, smart grid layer, and security layer. We introduce various GenAI techniques used in each layer of IoEV applications. Subsequently, public datasets available for training the GenAI models are summarized. Finally, we provide recommendations for future directions. This survey not only categorizes the applications of GenAI in IoEV across different layers but also serves as a valuable resource for researchers and practitioners by highlighting the design and implementation challenges within each layer. Furthermore, it provides a roadmap for future research directions, enabling the development of more robust and efficient IoEV systems through the integration of advanced GenAI techniques. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.15750v3-abstract-full').style.display = 'none'; document.getElementById('2409.15750v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">25 Pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.14827">arXiv:2409.14827</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.14827">pdf</a>, <a href="https://arxiv.org/format/2409.14827">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> </div> </div> <p class="title is-5 mathjax"> AIM 2024 Challenge on Video Saliency Prediction: Methods and Results </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Moskalenko%2C+A">Andrey Moskalenko</a>, <a href="/search/cs?searchtype=author&amp;query=Bryncev%2C+A">Alexey Bryncev</a>, <a href="/search/cs?searchtype=author&amp;query=Vatolin%2C+D">Dmitry Vatolin</a>, <a href="/search/cs?searchtype=author&amp;query=Timofte%2C+R">Radu Timofte</a>, <a href="/search/cs?searchtype=author&amp;query=Zhan%2C+G">Gen Zhan</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+L">Li Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+Y">Yunlong Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Liao%2C+Y">Yiting Liao</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+J">Jiongzhi Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+B">Baitao Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Moradi%2C+M">Morteza Moradi</a>, <a href="/search/cs?searchtype=author&amp;query=Moradi%2C+M">Mohammad Moradi</a>, <a href="/search/cs?searchtype=author&amp;query=Rundo%2C+F">Francesco Rundo</a>, <a href="/search/cs?searchtype=author&amp;query=Spampinato%2C+C">Concetto Spampinato</a>, <a href="/search/cs?searchtype=author&amp;query=Borji%2C+A">Ali Borji</a>, <a href="/search/cs?searchtype=author&amp;query=Palazzo%2C+S">Simone Palazzo</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+Y">Yuxin Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+Y">Yinan Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+H">Huiyu Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Cao%2C+Y">Yuqin Cao</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+Z">Ziheng Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+Q">Qiang Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Min%2C+X">Xiongkuo Min</a>, <a href="/search/cs?searchtype=author&amp;query=Zhai%2C+G">Guangtao Zhai</a>, <a href="/search/cs?searchtype=author&amp;query=Fang%2C+H">Hao Fang</a> , et al. (8 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.14827v1-abstract-short" style="display: inline;"> This paper reviews the Challenge on Video Saliency Prediction at AIM 2024. The goal of the participants was to develop a method for predicting accurate saliency maps for the provided set of video sequences. Saliency maps are widely exploited in various applications, including video compression, quality assessment, visual perception studies, the advertising industry, etc. For this competition, a pr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.14827v1-abstract-full').style.display = 'inline'; document.getElementById('2409.14827v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.14827v1-abstract-full" style="display: none;"> This paper reviews the Challenge on Video Saliency Prediction at AIM 2024. The goal of the participants was to develop a method for predicting accurate saliency maps for the provided set of video sequences. Saliency maps are widely exploited in various applications, including video compression, quality assessment, visual perception studies, the advertising industry, etc. For this competition, a previously unused large-scale audio-visual mouse saliency (AViMoS) dataset of 1500 videos with more than 70 observers per video was collected using crowdsourced mouse tracking. The dataset collection methodology has been validated using conventional eye-tracking data and has shown high consistency. Over 30 teams registered in the challenge, and there are 7 teams that submitted the results in the final phase. The final phase solutions were tested and ranked by commonly used quality metrics on a private test subset. The results of this evaluation and the descriptions of the solutions are presented in this report. All data, including the private test subset, is made publicly available on the challenge homepage - https://challenges.videoprocessing.ai/challenges/video-saliency-prediction.html. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.14827v1-abstract-full').style.display = 'none'; document.getElementById('2409.14827v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ECCVW 2024</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.4.6; I.2.10 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.14705">arXiv:2409.14705</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.14705">pdf</a>, <a href="https://arxiv.org/format/2409.14705">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Target-Aware Language Modeling via Granular Data Sampling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chang%2C+E">Ernie Chang</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+P">Pin-Jie Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yang Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Changsheng Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+D">Daeil Kim</a>, <a href="/search/cs?searchtype=author&amp;query=Rabatin%2C+R">Rastislav Rabatin</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zechun Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Shi%2C+Y">Yangyang Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Chandra%2C+V">Vikas Chandra</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.14705v1-abstract-short" style="display: inline;"> Language model pretraining generally targets a broad range of use cases and incorporates data from diverse sources. However, there are instances where we desire a model that excels in specific areas without markedly compromising performance in other areas. A cost-effective and straightforward approach is sampling with low-dimensional data features, which allows to select large-scale pretraining da&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.14705v1-abstract-full').style.display = 'inline'; document.getElementById('2409.14705v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.14705v1-abstract-full" style="display: none;"> Language model pretraining generally targets a broad range of use cases and incorporates data from diverse sources. However, there are instances where we desire a model that excels in specific areas without markedly compromising performance in other areas. A cost-effective and straightforward approach is sampling with low-dimensional data features, which allows to select large-scale pretraining data for domain-specific use cases. In this work, we revisit importance sampling with n-gram features consisting of multi-granular tokens, which strikes a good balance between sentence compression and representation capabilities. We observed the sampled data to have a high correlation with the target downstream task performance while preserving its effectiveness on other tasks. This leads to the proposed data sampling paradigm where language models can be pretrained more efficiently on selected documents. On eight benchmarks we demonstrate with $\sim$1% of the data, pretrained models perform on par with the full RefinedWeb data and outperform randomly selected samples for model sizes ranging from 125M to 1.5B. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.14705v1-abstract-full').style.display = 'none'; document.getElementById('2409.14705v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to EMNLP 2024 Main Conference, 9 pages, 6 figures, 3 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.14365">arXiv:2409.14365</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.14365">pdf</a>, <a href="https://arxiv.org/format/2409.14365">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> D3RoMa: Disparity Diffusion-based Depth Sensing for Material-Agnostic Robotic Manipulation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wei%2C+S">Songlin Wei</a>, <a href="/search/cs?searchtype=author&amp;query=Geng%2C+H">Haoran Geng</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jiayi Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+C">Congyue Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Cui%2C+W">Wenbo Cui</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chengyang Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Fang%2C+X">Xiaomeng Fang</a>, <a href="/search/cs?searchtype=author&amp;query=Guibas%2C+L">Leonidas Guibas</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">He Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.14365v2-abstract-short" style="display: inline;"> Depth sensing is an important problem for 3D vision-based robotics. Yet, a real-world active stereo or ToF depth camera often produces noisy and incomplete depth which bottlenecks robot performances. In this work, we propose D3RoMa, a learning-based depth estimation framework on stereo image pairs that predicts clean and accurate depth in diverse indoor scenes, even in the most challenging scenari&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.14365v2-abstract-full').style.display = 'inline'; document.getElementById('2409.14365v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.14365v2-abstract-full" style="display: none;"> Depth sensing is an important problem for 3D vision-based robotics. Yet, a real-world active stereo or ToF depth camera often produces noisy and incomplete depth which bottlenecks robot performances. In this work, we propose D3RoMa, a learning-based depth estimation framework on stereo image pairs that predicts clean and accurate depth in diverse indoor scenes, even in the most challenging scenarios with translucent or specular surfaces where classical depth sensing completely fails. Key to our method is that we unify depth estimation and restoration into an image-to-image translation problem by predicting the disparity map with a denoising diffusion probabilistic model. At inference time, we further incorporated a left-right consistency constraint as classifier guidance to the diffusion process. Our framework combines recently advanced learning-based approaches and geometric constraints from traditional stereo vision. For model training, we create a large scene-level synthetic dataset with diverse transparent and specular objects to compensate for existing tabletop datasets. The trained model can be directly applied to real-world in-the-wild scenes and achieve state-of-the-art performance in multiple public depth estimation benchmarks. Further experiments in real environments show that accurate depth prediction significantly improves robotic manipulation in various scenarios. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.14365v2-abstract-full').style.display = 'none'; document.getElementById('2409.14365v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.11234">arXiv:2409.11234</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.11234">pdf</a>, <a href="https://arxiv.org/format/2409.11234">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> STCMOT: Spatio-Temporal Cohesion Learning for UAV-Based Multiple Object Tracking </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ma%2C+J">Jianbo Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+C">Chuanming Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+F">Fei Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Can Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Jianlin Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Z">Zhiyong Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.11234v1-abstract-short" style="display: inline;"> Multiple object tracking (MOT) in Unmanned Aerial Vehicle (UAV) videos is important for diverse applications in computer vision. Current MOT trackers rely on accurate object detection results and precise matching of target reidentification (ReID). These methods focus on optimizing target spatial attributes while overlooking temporal cues in modelling object relationships, especially for challengin&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.11234v1-abstract-full').style.display = 'inline'; document.getElementById('2409.11234v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.11234v1-abstract-full" style="display: none;"> Multiple object tracking (MOT) in Unmanned Aerial Vehicle (UAV) videos is important for diverse applications in computer vision. Current MOT trackers rely on accurate object detection results and precise matching of target reidentification (ReID). These methods focus on optimizing target spatial attributes while overlooking temporal cues in modelling object relationships, especially for challenging tracking conditions such as object deformation and blurring, etc. To address the above-mentioned issues, we propose a novel Spatio-Temporal Cohesion Multiple Object Tracking framework (STCMOT), which utilizes historical embedding features to model the representation of ReID and detection features in a sequential order. Concretely, a temporal embedding boosting module is introduced to enhance the discriminability of individual embedding based on adjacent frame cooperation. While the trajectory embedding is then propagated by a temporal detection refinement module to mine salient target locations in the temporal field. Extensive experiments on the VisDrone2019 and UAVDT datasets demonstrate our STCMOT sets a new state-of-the-art performance in MOTA and IDF1 metrics. The source codes are released at https://github.com/ydhcg-BoBo/STCMOT. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.11234v1-abstract-full').style.display = 'none'; document.getElementById('2409.11234v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.11169">arXiv:2409.11169</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.11169">pdf</a>, <a href="https://arxiv.org/format/2409.11169">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> MAISI: Medical AI for Synthetic Imaging </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Guo%2C+P">Pengfei Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Can Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+D">Dong Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Z">Ziyue Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Nath%2C+V">Vishwesh Nath</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+Y">Yucheng Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Simon%2C+B">Benjamin Simon</a>, <a href="/search/cs?searchtype=author&amp;query=Belue%2C+M">Mason Belue</a>, <a href="/search/cs?searchtype=author&amp;query=Harmon%2C+S">Stephanie Harmon</a>, <a href="/search/cs?searchtype=author&amp;query=Turkbey%2C+B">Baris Turkbey</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+D">Daguang Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.11169v2-abstract-short" style="display: inline;"> Medical imaging analysis faces challenges such as data scarcity, high annotation costs, and privacy concerns. This paper introduces the Medical AI for Synthetic Imaging (MAISI), an innovative approach using the diffusion model to generate synthetic 3D computed tomography (CT) images to address those challenges. MAISI leverages the foundation volume compression network and the latent diffusion mode&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.11169v2-abstract-full').style.display = 'inline'; document.getElementById('2409.11169v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.11169v2-abstract-full" style="display: none;"> Medical imaging analysis faces challenges such as data scarcity, high annotation costs, and privacy concerns. This paper introduces the Medical AI for Synthetic Imaging (MAISI), an innovative approach using the diffusion model to generate synthetic 3D computed tomography (CT) images to address those challenges. MAISI leverages the foundation volume compression network and the latent diffusion model to produce high-resolution CT images (up to a landmark volume dimension of 512 x 512 x 768 ) with flexible volume dimensions and voxel spacing. By incorporating ControlNet, MAISI can process organ segmentation, including 127 anatomical structures, as additional conditions and enables the generation of accurately annotated synthetic images that can be used for various downstream tasks. Our experiment results show that MAISI&#39;s capabilities in generating realistic, anatomically accurate images for diverse regions and conditions reveal its promising potential to mitigate challenges using synthetic data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.11169v2-abstract-full').style.display = 'none'; document.getElementById('2409.11169v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">WACV25 accepted. https://monai.io/research/maisi</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.05493">arXiv:2409.05493</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.05493">pdf</a>, <a href="https://arxiv.org/format/2409.05493">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> DexDiff: Towards Extrinsic Dexterity Manipulation of Ungraspable Objects in Unrestricted Environments </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ma%2C+C">Chengzhong Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+H">Houxue Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H">Hanbo Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zeyang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chao Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+J">Jian Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Lan%2C+X">Xuguang Lan</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+N">Nanning Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.05493v1-abstract-short" style="display: inline;"> Grasping large and flat objects (e.g. a book or a pan) is often regarded as an ungraspable task, which poses significant challenges due to the unreachable grasping poses. Previous works leverage Extrinsic Dexterity like walls or table edges to grasp such objects. However, they are limited to task-specific policies and lack task planning to find pre-grasp conditions. This makes it difficult to adap&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.05493v1-abstract-full').style.display = 'inline'; document.getElementById('2409.05493v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.05493v1-abstract-full" style="display: none;"> Grasping large and flat objects (e.g. a book or a pan) is often regarded as an ungraspable task, which poses significant challenges due to the unreachable grasping poses. Previous works leverage Extrinsic Dexterity like walls or table edges to grasp such objects. However, they are limited to task-specific policies and lack task planning to find pre-grasp conditions. This makes it difficult to adapt to various environments and extrinsic dexterity constraints. Therefore, we present DexDiff, a robust robotic manipulation method for long-horizon planning with extrinsic dexterity. Specifically, we utilize a vision-language model (VLM) to perceive the environmental state and generate high-level task plans, followed by a goal-conditioned action diffusion (GCAD) model to predict the sequence of low-level actions. This model learns the low-level policy from offline data with the cumulative reward guided by high-level planning as the goal condition, which allows for improved prediction of robot actions. Experimental results demonstrate that our method not only effectively performs ungraspable tasks but also generalizes to previously unseen objects. It outperforms baselines by a 47% higher success rate in simulation and facilitates efficient deployment and manipulation in real-world scenarios. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.05493v1-abstract-full').style.display = 'none'; document.getElementById('2409.05493v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.03853">arXiv:2409.03853</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.03853">pdf</a>, <a href="https://arxiv.org/format/2409.03853">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3678575">10.1145/3678575 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Users&#39; Perspectives on Multimodal Menstrual Tracking Using Consumer Health Devices </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lin%2C+G">Georgianna Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+B">Brenna Li</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+H">Helen Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chloe Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Truong%2C+K+N">Khai N Truong</a>, <a href="/search/cs?searchtype=author&amp;query=Mariakakis%2C+A">Alex Mariakakis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.03853v1-abstract-short" style="display: inline;"> Previous menstrual health literature highlights a variety of signals not included in existing menstrual trackers because they are either difficult to gather or are not typically associated with menstrual health. Since it has become increasingly convenient to collect biomarkers through wearables and other consumer-grade devices, our work examines how people incorporate unconventional signals (e.g.,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.03853v1-abstract-full').style.display = 'inline'; document.getElementById('2409.03853v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.03853v1-abstract-full" style="display: none;"> Previous menstrual health literature highlights a variety of signals not included in existing menstrual trackers because they are either difficult to gather or are not typically associated with menstrual health. Since it has become increasingly convenient to collect biomarkers through wearables and other consumer-grade devices, our work examines how people incorporate unconventional signals (e.g., blood glucose levels, heart rate) into their understanding of menstrual health. In this paper, we describe a three-month-long study on fifty participants&#39; experiences as they tracked their health using physiological sensors and daily diaries. We analyzed their experiences with both conventional and unconventional menstrual health signals through surveys and interviews conducted throughout the study. We delve into the various aspects of menstrual health that participants sought to affirm using unconventional signals, explore how these signals influenced their daily behaviors, and examine how multimodal menstrual tracking expanded their scope of menstrual health. Finally, we provide design recommendations for future multimodal menstrual trackers. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.03853v1-abstract-full').style.display = 'none'; document.getElementById('2409.03853v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">25 pages, 4 figures, 2 tables. The paper was accepted by IMWUT/Ubicomp 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.02877">arXiv:2409.02877</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.02877">pdf</a>, <a href="https://arxiv.org/format/2409.02877">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Configurable Foundation Models: Building LLMs from a Modular Perspective </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xiao%2C+C">Chaojun Xiao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zhengyan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+C">Chenyang Song</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+D">Dazhi Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Yao%2C+F">Feng Yao</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+X">Xu Han</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+X">Xiaozhi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Shuo Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+Y">Yufei Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+G">Guanyu Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Yingfa Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+W">Weilin Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Tu%2C+Y">Yuge Tu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhong%2C+Z">Zexuan Zhong</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+A">Ao Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Si%2C+C">Chenglei Si</a>, <a href="/search/cs?searchtype=author&amp;query=Moo%2C+K+H">Khai Hao Moo</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chenyang Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+H">Huimin Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+Y">Yankai Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zhiyuan Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Shang%2C+J">Jingbo Shang</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+M">Maosong Sun</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.02877v1-abstract-short" style="display: inline;"> Advancements in LLMs have recently unveiled challenges tied to computational efficiency and continual scalability due to their requirements of huge parameters, making the applications and evolution of these models on devices with limited computation resources and scenarios requiring various abilities increasingly cumbersome. Inspired by modularity within the human brain, there is a growing tendenc&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.02877v1-abstract-full').style.display = 'inline'; document.getElementById('2409.02877v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.02877v1-abstract-full" style="display: none;"> Advancements in LLMs have recently unveiled challenges tied to computational efficiency and continual scalability due to their requirements of huge parameters, making the applications and evolution of these models on devices with limited computation resources and scenarios requiring various abilities increasingly cumbersome. Inspired by modularity within the human brain, there is a growing tendency to decompose LLMs into numerous functional modules, allowing for inference with part of modules and dynamic assembly of modules to tackle complex tasks, such as mixture-of-experts. To highlight the inherent efficiency and composability of the modular approach, we coin the term brick to represent each functional module, designating the modularized structure as configurable foundation models. In this paper, we offer a comprehensive overview and investigation of the construction, utilization, and limitation of configurable foundation models. We first formalize modules into emergent bricks - functional neuron partitions that emerge during the pre-training phase, and customized bricks - bricks constructed via additional post-training to improve the capabilities and knowledge of LLMs. Based on diverse functional bricks, we further present four brick-oriented operations: retrieval and routing, merging, updating, and growing. These operations allow for dynamic configuration of LLMs based on instructions to handle complex tasks. To verify our perspective, we conduct an empirical analysis on widely-used LLMs. We find that the FFN layers follow modular patterns with functional specialization of neurons and functional neuron partitions. Finally, we highlight several open issues and directions for future research. Overall, this paper aims to offer a fresh modular perspective on existing LLM research and inspire the future creation of more efficient and scalable foundational models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.02877v1-abstract-full').style.display = 'none'; document.getElementById('2409.02877v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.02132">arXiv:2409.02132</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.02132">pdf</a>, <a href="https://arxiv.org/format/2409.02132">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Recognition of Schrodinger cat state based on CNN </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+T">Tao Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chaoying Zhao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.02132v1-abstract-short" style="display: inline;"> We applied convolutional neural networks to the classification of cat states and coherent states. Initially, we generated datasets of Schrodinger cat states and coherent states from nonlinear processes and preprocessed these datasets. Subsequently, we constructed both LeNet and ResNet network architectures, adjusting parameters such as convolution kernels and strides to optimal values. We then tra&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.02132v1-abstract-full').style.display = 'inline'; document.getElementById('2409.02132v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.02132v1-abstract-full" style="display: none;"> We applied convolutional neural networks to the classification of cat states and coherent states. Initially, we generated datasets of Schrodinger cat states and coherent states from nonlinear processes and preprocessed these datasets. Subsequently, we constructed both LeNet and ResNet network architectures, adjusting parameters such as convolution kernels and strides to optimal values. We then trained both LeNet and ResNet on the training sets. The loss function values indicated that ResNet performs better in classifying cat states and coherent states. Finally, we evaluated the trained models on the test sets, achieving an accuracy of 97.5% for LeNet and 100% for ResNet. We evaluated cat states and coherent states with different 伪, demonstrating a certain degree of generalization capability. The results show that LeNet may mistakenly recognize coherent states as cat states without coherent features, while ResNet provides a feasible solution to the problem of mistakenly recognizing cat states and coherent states by traditional neural networks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.02132v1-abstract-full').style.display = 'none'; document.getElementById('2409.02132v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">6pages,5figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.01557">arXiv:2409.01557</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.01557">pdf</a>, <a href="https://arxiv.org/format/2409.01557">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> TASL-Net: Tri-Attention Selective Learning Network for Intelligent Diagnosis of Bimodal Ultrasound Video </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chengqian Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Yao%2C+Z">Zhao Yao</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+Z">Zhaoyu Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+Y">Yuanxin Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yafang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yuanyuan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+S">Shuo Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+J">Jianhua Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+J">Jianqiao Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yin Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+J">Jinhua Yu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.01557v1-abstract-short" style="display: inline;"> In the intelligent diagnosis of bimodal (gray-scale and contrast-enhanced) ultrasound videos, medical domain knowledge such as the way sonographers browse videos, the particular areas they emphasize, and the features they pay special attention to, plays a decisive role in facilitating precise diagnosis. Embedding medical knowledge into the deep learning network can not only enhance performance but&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.01557v1-abstract-full').style.display = 'inline'; document.getElementById('2409.01557v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.01557v1-abstract-full" style="display: none;"> In the intelligent diagnosis of bimodal (gray-scale and contrast-enhanced) ultrasound videos, medical domain knowledge such as the way sonographers browse videos, the particular areas they emphasize, and the features they pay special attention to, plays a decisive role in facilitating precise diagnosis. Embedding medical knowledge into the deep learning network can not only enhance performance but also boost clinical confidence and reliability of the network. However, it is an intractable challenge to automatically focus on these person- and disease-specific features in videos and to enable networks to encode bimodal information comprehensively and efficiently. This paper proposes a novel Tri-Attention Selective Learning Network (TASL-Net) to tackle this challenge and automatically embed three types of diagnostic attention of sonographers into a mutual transformer framework for intelligent diagnosis of bimodal ultrasound videos. Firstly, a time-intensity-curve-based video selector is designed to mimic the temporal attention of sonographers, thus removing a large amount of redundant information while improving computational efficiency of TASL-Net. Then, to introduce the spatial attention of the sonographers for contrast-enhanced video analysis, we propose the earliest-enhanced position detector based on structural similarity variation, on which the TASL-Net is made to focus on the differences of perfusion variation inside and outside the lesion. Finally, by proposing a mutual encoding strategy that combines convolution and transformer, TASL-Net possesses bimodal attention to structure features on gray-scale videos and to perfusion variations on contrast-enhanced videos. These modules work collaboratively and contribute to superior performance. We conduct a detailed experimental validation of TASL-Net&#39;s performance on three datasets, including lung, breast, and liver. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.01557v1-abstract-full').style.display = 'none'; document.getElementById('2409.01557v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.00060">arXiv:2409.00060</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.00060">pdf</a>, <a href="https://arxiv.org/format/2409.00060">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Understanding Literary Texts by LLMs: A Case Study of Ancient Chinese Poetry </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Cheng Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+B">Bin Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhen Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.00060v2-abstract-short" style="display: inline;"> The birth and rapid development of large language models (LLMs) have caused quite a stir in the field of literature. Once considered unattainable, AI&#39;s role in literary creation is increasingly becoming a reality. In genres such as poetry, jokes, and short stories, numerous AI tools have emerged, offering refreshing new perspectives. However, it&#39;s difficult to further improve the quality of these&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.00060v2-abstract-full').style.display = 'inline'; document.getElementById('2409.00060v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.00060v2-abstract-full" style="display: none;"> The birth and rapid development of large language models (LLMs) have caused quite a stir in the field of literature. Once considered unattainable, AI&#39;s role in literary creation is increasingly becoming a reality. In genres such as poetry, jokes, and short stories, numerous AI tools have emerged, offering refreshing new perspectives. However, it&#39;s difficult to further improve the quality of these works. This is primarily because understanding and appreciating a good literary work involves a considerable threshold, such as knowledge of literary theory, aesthetic sensibility, interdisciplinary knowledge. Therefore, authoritative data in this area is quite lacking. Additionally, evaluating literary works is often complex and hard to fully quantify, which directly hinders the further development of AI creation. To address this issue, this paper attempts to explore the mysteries of literary texts from the perspective of LLMs, using ancient Chinese poetry as an example for experimentation. First, we collected a variety of ancient poems from different sources and had experts annotate a small portion of them. Then, we designed a range of comprehension metrics based on LLMs to evaluate all these poems. Finally, we analyzed the correlations and differences between various poem collections to identify literary patterns. Through our experiments, we observed a series of enlightening phenomena that provide technical support for the future development of high-level literary creation based on LLMs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.00060v2-abstract-full').style.display = 'none'; document.getElementById('2409.00060v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.16365">arXiv:2408.16365</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.16365">pdf</a>, <a href="https://arxiv.org/ps/2408.16365">ps</a>, <a href="https://arxiv.org/format/2408.16365">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Protograph-Based Batched Network Codes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+M">Mingyang Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+M">Ming Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+C">Chunming Zhao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.16365v1-abstract-short" style="display: inline;"> Batched network codes (BNCs) are a low-complexity solution for communication through networks with packet loss. Although their belief propagation (BP) performance is proved to approach capacity in the asymptotic regime, there is no evidence indicating that their BP performance is as good as expected in the finite-length regime. In this paper, we propose a protograph-based construction for BNCs, re&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.16365v1-abstract-full').style.display = 'inline'; document.getElementById('2408.16365v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.16365v1-abstract-full" style="display: none;"> Batched network codes (BNCs) are a low-complexity solution for communication through networks with packet loss. Although their belief propagation (BP) performance is proved to approach capacity in the asymptotic regime, there is no evidence indicating that their BP performance is as good as expected in the finite-length regime. In this paper, we propose a protograph-based construction for BNCs, referred to as protograph-based BNCs (P-BNCs), which significantly differs from existing BNCs in three aspects: 1) Unlike traditional constructions where the degree of variable nodes is random, P-BNCs have a highly structured Tanner graph with specified degree distributions for both variable nodes and check nodes. 2) Traditional BNCs use a fixed degree distribution to generate all batches, making their performance highly sensitive to channel conditions, but P-BNCs achieve good performance under varying channel conditions due to their rate-compatible structures. 3) The construction of PBNCs takes into account joint BP decoding with a sparse precode, whereas traditional constructions typically do not consider a precode, or assume the presence of a precode that can recover a certain fraction of erasures. Thanks to these three improvements, P-BNCs not only have higher achievable rates under varying channel conditions, but more importantly, their finite-length BP performance is significantly improved. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.16365v1-abstract-full').style.display = 'none'; document.getElementById('2408.16365v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">submitted to IEEE for possible publication</span> </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Zhao%2C+C&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Zhao%2C+C&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhao%2C+C&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhao%2C+C&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhao%2C+C&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zhao%2C+C&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10