CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 583 results for author: <span class="mathjax">Zheng, X</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Zheng%2C+X">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Zheng, X"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Zheng%2C+X&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Zheng, X"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Zheng%2C+X&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Zheng%2C+X&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zheng%2C+X&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zheng%2C+X&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zheng%2C+X&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zheng%2C+X&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14072">arXiv:2411.14072</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.14072">pdf</a>, <a href="https://arxiv.org/format/2411.14072">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Programming Languages">cs.PL</span> </div> </div> <p class="title is-5 mathjax"> The Master-Slave Encoder Model for Improving Patent Text Summarization: A New Approach to Combining Specifications and Claims </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+S">Shu Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+X">Xin Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Z">Zhengda Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Yi%2C+H">Haohan Yi</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xuhui Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Wan%2C+H">Hao Wan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14072v1-abstract-short" style="display: inline;"> In order to solve the problem of insufficient generation quality caused by traditional patent text abstract generation models only originating from patent specifications, the problem of new terminology OOV caused by rapid patent updates, and the problem of information redundancy caused by insufficient consideration of the high professionalism, accuracy, and uniqueness of patent texts, we proposes&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14072v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14072v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14072v1-abstract-full" style="display: none;"> In order to solve the problem of insufficient generation quality caused by traditional patent text abstract generation models only originating from patent specifications, the problem of new terminology OOV caused by rapid patent updates, and the problem of information redundancy caused by insufficient consideration of the high professionalism, accuracy, and uniqueness of patent texts, we proposes a patent text abstract generation model (MSEA) based on a master-slave encoder architecture; Firstly, the MSEA model designs a master-slave encoder, which combines the instructions in the patent text with the claims as input, and fully explores the characteristics and details between the two through the master-slave encoder; Then, the model enhances the consideration of new technical terms in the input sequence based on the pointer network, and further enhances the correlation with the input text by re weighing the &#34;remembered&#34; and &#34;for-gotten&#34; parts of the input sequence from the encoder; Finally, an enhanced repetition suppression mechanism for patent text was introduced to ensure accurate and non redundant abstracts generated. On a publicly available patent text dataset, compared to the state-of-the-art model, Improved Multi-Head Attention Mechanism (IMHAM), the MSEA model achieves an improvement of 0.006, 0.005, and 0.005 in Rouge-1, Rouge-2, and Rouge-L scores, respectively. MSEA leverages the characteristics of patent texts to effectively enhance the quality of patent text generation, demonstrating its advancement and effectiveness in the experiments. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14072v1-abstract-full').style.display = 'none'; document.getElementById('2411.14072v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">25pages, 1 figure</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13093">arXiv:2411.13093</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13093">pdf</a>, <a href="https://arxiv.org/format/2411.13093">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Video-RAG: Visually-aligned Retrieval-Augmented Long Video Comprehension </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Luo%2C+Y">Yongdong Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xiawu Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+X">Xiao Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+G">Guilin Li</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+H">Haojia Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+J">Jinfa Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Ji%2C+J">Jiayi Ji</a>, <a href="/search/cs?searchtype=author&amp;query=Chao%2C+F">Fei Chao</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+J">Jiebo Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Ji%2C+R">Rongrong Ji</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13093v1-abstract-short" style="display: inline;"> Existing large video-language models (LVLMs) struggle to comprehend long videos correctly due to limited context. To address this problem, fine-tuning long-context LVLMs and employing GPT-based agents have emerged as promising solutions. However, fine-tuning LVLMs would require extensive high-quality data and substantial GPU resources, while GPT-based agents would rely on proprietary models (e.g.,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13093v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13093v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13093v1-abstract-full" style="display: none;"> Existing large video-language models (LVLMs) struggle to comprehend long videos correctly due to limited context. To address this problem, fine-tuning long-context LVLMs and employing GPT-based agents have emerged as promising solutions. However, fine-tuning LVLMs would require extensive high-quality data and substantial GPU resources, while GPT-based agents would rely on proprietary models (e.g., GPT-4o). In this paper, we propose Video Retrieval-Augmented Generation (Video-RAG), a training-free and cost-effective pipeline that employs visually-aligned auxiliary texts to help facilitate cross-modality alignment while providing additional information beyond the visual content. Specifically, we leverage open-source external tools to extract visually-aligned information from pure video data (e.g., audio, optical character, and object detection), and incorporate the extracted information into an existing LVLM as auxiliary texts, alongside video frames and queries, in a plug-and-play manner. Our Video-RAG offers several key advantages: (i) lightweight with low computing overhead due to single-turn retrieval; (ii) easy implementation and compatibility with any LVLM; and (iii) significant, consistent performance gains across long video understanding benchmarks, including Video-MME, MLVU, and LongVideoBench. Notably, our model demonstrates superior performance over proprietary models like Gemini-1.5-Pro and GPT-4o when utilized with a 72B model. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13093v1-abstract-full').style.display = 'none'; document.getElementById('2411.13093v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 6 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11620">arXiv:2411.11620</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11620">pdf</a>, <a href="https://arxiv.org/format/2411.11620">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> ST-Tree with Interpretability for Multivariate Time Series Classification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Du%2C+M">Mingsen Du</a>, <a href="/search/cs?searchtype=author&amp;query=Wei%2C+Y">Yanxuan Wei</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+Y">Yingxia Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xiangwei Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Wei%2C+S">Shoushui Wei</a>, <a href="/search/cs?searchtype=author&amp;query=Ji%2C+C">Cun Ji</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11620v1-abstract-short" style="display: inline;"> Multivariate time series classification is of great importance in practical applications and is a challenging task. However, deep neural network models such as Transformers exhibit high accuracy in multivariate time series classification but lack interpretability and fail to provide insights into the decision-making process. On the other hand, traditional approaches based on decision tree classifi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11620v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11620v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11620v1-abstract-full" style="display: none;"> Multivariate time series classification is of great importance in practical applications and is a challenging task. However, deep neural network models such as Transformers exhibit high accuracy in multivariate time series classification but lack interpretability and fail to provide insights into the decision-making process. On the other hand, traditional approaches based on decision tree classifiers offer clear decision processes but relatively lower accuracy. Swin Transformer (ST) addresses these issues by leveraging self-attention mechanisms to capture both fine-grained local patterns and global patterns. It can also model multi-scale feature representation learning, thereby providing a more comprehensive representation of time series features. To tackle the aforementioned challenges, we propose ST-Tree with interpretability for multivariate time series classification. Specifically, the ST-Tree model combines ST as the backbone network with an additional neural tree model. This integration allows us to fully leverage the advantages of ST in learning time series context while providing interpretable decision processes through the neural tree. This enables researchers to gain clear insights into the model&#39;s decision-making process and extract meaningful interpretations. Through experimental evaluations on 10 UEA datasets, we demonstrate that the ST-Tree model improves accuracy in multivariate time series classification tasks and provides interpretability through visualizing the decision-making process across different datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11620v1-abstract-full').style.display = 'none'; document.getElementById('2411.11620v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submitted on May 15, 2024, major revisions on Aug 31, 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10575">arXiv:2411.10575</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.10575">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Digital Libraries">cs.DL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> </div> <p class="title is-5 mathjax"> Tenure and Research Trajectories </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tripodi%2C+G">Giorgio Tripodi</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xiang Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Qian%2C+Y">Yifan Qian</a>, <a href="/search/cs?searchtype=author&amp;query=Murray%2C+D">Dakota Murray</a>, <a href="/search/cs?searchtype=author&amp;query=Jones%2C+B+F">Benjamin F. Jones</a>, <a href="/search/cs?searchtype=author&amp;query=Ni%2C+C">Chaoqun Ni</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+D">Dashun Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10575v1-abstract-short" style="display: inline;"> Tenure is a cornerstone of the US academic system, yet its relationship to faculty research trajectories remains poorly understood. Conceptually, tenure systems may act as a selection mechanism, screening in high-output researchers; a dynamic incentive mechanism, encouraging high output prior to tenure but low output after tenure; and a creative search mechanism, encouraging tenured individuals to&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10575v1-abstract-full').style.display = 'inline'; document.getElementById('2411.10575v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10575v1-abstract-full" style="display: none;"> Tenure is a cornerstone of the US academic system, yet its relationship to faculty research trajectories remains poorly understood. Conceptually, tenure systems may act as a selection mechanism, screening in high-output researchers; a dynamic incentive mechanism, encouraging high output prior to tenure but low output after tenure; and a creative search mechanism, encouraging tenured individuals to undertake high-risk work. Here, we integrate data from seven different sources to trace US tenure-line faculty and their research outputs at an unprecedented scale and scope, covering over 12,000 researchers across 15 disciplines. Our analysis reveals that faculty publication rates typically increase sharply during the tenure track and peak just before obtaining tenure. Post-tenure trends, however, vary across disciplines: in lab-based fields, such as biology and chemistry, research output typically remains high post-tenure, whereas in non-lab-based fields, such as mathematics and sociology, research output typically declines substantially post-tenure. Turning to creative search, faculty increasingly produce novel, high-risk research after securing tenure. However, this shift toward novelty and risk-taking comes with a decline in impact, with post-tenure research yielding fewer highly cited papers. Comparing outcomes across common career ages but different tenure years or comparing research trajectories in tenure-based and non-tenure-based research settings underscores that breaks in the research trajectories are sharply tied to the individual&#39;s tenure year. Overall, these findings provide a new empirical basis for understanding the tenure system, individual research trajectories, and the shape of scientific output. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10575v1-abstract-full').style.display = 'none'; document.getElementById('2411.10575v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.06881">arXiv:2411.06881</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.06881">pdf</a>, <a href="https://arxiv.org/format/2411.06881">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> WassFFed: Wasserstein Fair Federated Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Han%2C+Z">Zhongxuan Han</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+L">Li Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+C">Chaochao Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xiaolin Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+F">Fei Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yuyuan Li</a>, <a href="/search/cs?searchtype=author&amp;query=Yin%2C+J">Jianwei Yin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.06881v1-abstract-short" style="display: inline;"> Federated Learning (FL) employs a training approach to address scenarios where users&#39; data cannot be shared across clients. Achieving fairness in FL is imperative since training data in FL is inherently geographically distributed among diverse user groups. Existing research on fairness predominantly assumes access to the entire training data, making direct transfer to FL challenging. However, the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06881v1-abstract-full').style.display = 'inline'; document.getElementById('2411.06881v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.06881v1-abstract-full" style="display: none;"> Federated Learning (FL) employs a training approach to address scenarios where users&#39; data cannot be shared across clients. Achieving fairness in FL is imperative since training data in FL is inherently geographically distributed among diverse user groups. Existing research on fairness predominantly assumes access to the entire training data, making direct transfer to FL challenging. However, the limited existing research on fairness in FL does not effectively address two key challenges, i.e., (CH1) Current methods fail to deal with the inconsistency between fair optimization results obtained with surrogate functions and fair classification results. (CH2) Directly aggregating local fair models does not always yield a globally fair model due to non Identical and Independent data Distributions (non-IID) among clients. To address these challenges, we propose a Wasserstein Fair Federated Learning framework, namely WassFFed. To tackle CH1, we ensure that the outputs of local models, rather than the loss calculated with surrogate functions or classification results with a threshold, remain independent of various user groups. To resolve CH2, we employ a Wasserstein barycenter calculation of all local models&#39; outputs for each user group, bringing local model outputs closer to the global output distribution to ensure consistency between the global model and local models. We conduct extensive experiments on three real-world datasets, demonstrating that WassFFed outperforms existing approaches in striking a balance between accuracy and fairness. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06881v1-abstract-full').style.display = 'none'; document.getElementById('2411.06881v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submitted to TKDE</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.03752">arXiv:2411.03752</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.03752">pdf</a>, <a href="https://arxiv.org/format/2411.03752">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Deferred Poisoning: Making the Model More Vulnerable via Hessian Singularization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=He%2C+Y">Yuhao He</a>, <a href="/search/cs?searchtype=author&amp;query=Tian%2C+J">Jinyu Tian</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xianwei Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+L">Li Dong</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yuanman Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+L+Y">Leo Yu Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+J">Jiantao Zhou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.03752v1-abstract-short" style="display: inline;"> Recent studies have shown that deep learning models are very vulnerable to poisoning attacks. Many defense methods have been proposed to address this issue. However, traditional poisoning attacks are not as threatening as commonly believed. This is because they often cause differences in how the model performs on the training set compared to the validation set. Such inconsistency can alert defende&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03752v1-abstract-full').style.display = 'inline'; document.getElementById('2411.03752v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.03752v1-abstract-full" style="display: none;"> Recent studies have shown that deep learning models are very vulnerable to poisoning attacks. Many defense methods have been proposed to address this issue. However, traditional poisoning attacks are not as threatening as commonly believed. This is because they often cause differences in how the model performs on the training set compared to the validation set. Such inconsistency can alert defenders that their data has been poisoned, allowing them to take the necessary defensive actions. In this paper, we introduce a more threatening type of poisoning attack called the Deferred Poisoning Attack. This new attack allows the model to function normally during the training and validation phases but makes it very sensitive to evasion attacks or even natural noise. We achieve this by ensuring the poisoned model&#39;s loss function has a similar value as a normally trained model at each input sample but with a large local curvature. A similar model loss ensures that there is no obvious inconsistency between the training and validation accuracy, demonstrating high stealthiness. On the other hand, the large curvature implies that a small perturbation may cause a significant increase in model loss, leading to substantial performance degradation, which reflects a worse robustness. We fulfill this purpose by making the model have singular Hessian information at the optimal point via our proposed Singularization Regularization term. We have conducted both theoretical and empirical analyses of the proposed method and validated its effectiveness through experiments on image classification tasks. Furthermore, we have confirmed the hazards of this form of poisoning attack under more general scenarios using natural noise, offering a new perspective for research in the field of security. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03752v1-abstract-full').style.display = 'none'; document.getElementById('2411.03752v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02937">arXiv:2411.02937</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.02937">pdf</a>, <a href="https://arxiv.org/format/2411.02937">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Benchmarking Multimodal Retrieval Augmented Generation with Dynamic VQA Dataset and Self-adaptive Planning Agent </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yangning Li</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yinghui Li</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+X">Xinyu Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Y">Yong Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zhen Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xinran Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hui Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+H">Hai-Tao Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+P">Pengjun Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+P+S">Philip S. Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+F">Fei Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+J">Jingren Zhou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02937v2-abstract-short" style="display: inline;"> Multimodal Retrieval Augmented Generation (mRAG) plays an important role in mitigating the &#34;hallucination&#34; issue inherent in multimodal large language models (MLLMs). Although promising, existing heuristic mRAGs typically predefined fixed retrieval processes, which causes two issues: (1) Non-adaptive Retrieval Queries. (2) Overloaded Retrieval Queries. However, these flaws cannot be adequately ref&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02937v2-abstract-full').style.display = 'inline'; document.getElementById('2411.02937v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02937v2-abstract-full" style="display: none;"> Multimodal Retrieval Augmented Generation (mRAG) plays an important role in mitigating the &#34;hallucination&#34; issue inherent in multimodal large language models (MLLMs). Although promising, existing heuristic mRAGs typically predefined fixed retrieval processes, which causes two issues: (1) Non-adaptive Retrieval Queries. (2) Overloaded Retrieval Queries. However, these flaws cannot be adequately reflected by current knowledge-seeking visual question answering (VQA) datasets, since the most required knowledge can be readily obtained with a standard two-step retrieval. To bridge the dataset gap, we first construct Dyn-VQA dataset, consisting of three types of &#34;dynamic&#34; questions, which require complex knowledge retrieval strategies variable in query, tool, and time: (1) Questions with rapidly changing answers. (2) Questions requiring multi-modal knowledge. (3) Multi-hop questions. Experiments on Dyn-VQA reveal that existing heuristic mRAGs struggle to provide sufficient and precisely relevant knowledge for dynamic questions due to their rigid retrieval processes. Hence, we further propose the first self-adaptive planning agent for multimodal retrieval, OmniSearch. The underlying idea is to emulate the human behavior in question solution which dynamically decomposes complex multimodal questions into sub-question chains with retrieval action. Extensive experiments prove the effectiveness of our OmniSearch, also provide direction for advancing mRAG. The code and dataset will be open-sourced at https://github.com/Alibaba-NLP/OmniSearch. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02937v2-abstract-full').style.display = 'none'; document.getElementById('2411.02937v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.00388">arXiv:2411.00388</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.00388">pdf</a>, <a href="https://arxiv.org/format/2411.00388">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Science and Game Theory">cs.GT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Towards Data Valuation via Asymmetric Data Shapley </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xi Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Chang%2C+X">Xiangyu Chang</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+R">Ruoxi Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+Y">Yong Tan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.00388v2-abstract-short" style="display: inline;"> As data emerges as a vital driver of technological and economic advancements, a key challenge is accurately quantifying its value in algorithmic decision-making. The Shapley value, a well-established concept from cooperative game theory, has been widely adopted to assess the contribution of individual data sources in supervised machine learning. However, its symmetry axiom assumes all players in t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.00388v2-abstract-full').style.display = 'inline'; document.getElementById('2411.00388v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.00388v2-abstract-full" style="display: none;"> As data emerges as a vital driver of technological and economic advancements, a key challenge is accurately quantifying its value in algorithmic decision-making. The Shapley value, a well-established concept from cooperative game theory, has been widely adopted to assess the contribution of individual data sources in supervised machine learning. However, its symmetry axiom assumes all players in the cooperative game are homogeneous, which overlooks the complex structures and dependencies present in real-world datasets. To address this limitation, we extend the traditional data Shapley framework to asymmetric data Shapley, making it flexible enough to incorporate inherent structures within the datasets for structure-aware data valuation. We also introduce an efficient $k$-nearest neighbor-based algorithm for its exact computation. We demonstrate the practical applicability of our framework across various machine learning tasks and data market contexts. The code is available at: https://github.com/xzheng01/Asymmetric-Data-Shapley. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.00388v2-abstract-full').style.display = 'none'; document.getElementById('2411.00388v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 1 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.20971">arXiv:2410.20971</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.20971">pdf</a>, <a href="https://arxiv.org/format/2410.20971">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> BlueSuffix: Reinforced Blue Teaming for Vision-Language Models Against Jailbreak Attacks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Y">Yunhan Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xiang Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+L">Lin Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yige Li</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+X">Xingjun Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Y">Yu-Gang Jiang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.20971v1-abstract-short" style="display: inline;"> Despite their superb multimodal capabilities, Vision-Language Models (VLMs) have been shown to be vulnerable to jailbreak attacks, which are inference-time attacks that induce the model to output harmful responses with tricky prompts. It is thus essential to defend VLMs against potential jailbreaks for their trustworthy deployment in real-world applications. In this work, we focus on black-box def&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.20971v1-abstract-full').style.display = 'inline'; document.getElementById('2410.20971v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.20971v1-abstract-full" style="display: none;"> Despite their superb multimodal capabilities, Vision-Language Models (VLMs) have been shown to be vulnerable to jailbreak attacks, which are inference-time attacks that induce the model to output harmful responses with tricky prompts. It is thus essential to defend VLMs against potential jailbreaks for their trustworthy deployment in real-world applications. In this work, we focus on black-box defense for VLMs against jailbreak attacks. Existing black-box defense methods are either unimodal or bimodal. Unimodal methods enhance either the vision or language module of the VLM, while bimodal methods robustify the model through text-image representation realignment. However, these methods suffer from two limitations: 1) they fail to fully exploit the cross-modal information, or 2) they degrade the model performance on benign inputs. To address these limitations, we propose a novel blue-team method BlueSuffix that defends the black-box target VLM against jailbreak attacks without compromising its performance. BlueSuffix includes three key components: 1) a visual purifier against jailbreak images, 2) a textual purifier against jailbreak texts, and 3) a blue-team suffix generator fine-tuned via reinforcement learning for enhancing cross-modal robustness. We empirically show on three VLMs (LLaVA, MiniGPT-4, and Gemini) and two safety benchmarks (MM-SafetyBench and RedTeam-2K) that BlueSuffix outperforms the baseline defenses by a significant margin. Our BlueSuffix opens up a promising direction for defending VLMs against jailbreak attacks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.20971v1-abstract-full').style.display = 'none'; document.getElementById('2410.20971v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.20374">arXiv:2410.20374</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.20374">pdf</a>, <a href="https://arxiv.org/format/2410.20374">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> A CT-guided Control Framework of a Robotic Flexible Endoscope for the Diagnosis of the Maxillary Sinusitis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+P">Puchen Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H">Huayu Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+X">Xin Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xiaoyin Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+X">Xuchen Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Au%2C+K+W+S">Kwok Wai Samuel Au</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.20374v1-abstract-short" style="display: inline;"> Flexible endoscopes are commonly adopted in narrow and confined anatomical cavities due to their higher reachability and dexterity. However, prolonged and unintuitive manipulation of these endoscopes leads to an increased workload on surgeons and risks of collision. To address these challenges, this paper proposes a CT-guided control framework for the diagnosis of maxillary sinusitis by using a ro&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.20374v1-abstract-full').style.display = 'inline'; document.getElementById('2410.20374v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.20374v1-abstract-full" style="display: none;"> Flexible endoscopes are commonly adopted in narrow and confined anatomical cavities due to their higher reachability and dexterity. However, prolonged and unintuitive manipulation of these endoscopes leads to an increased workload on surgeons and risks of collision. To address these challenges, this paper proposes a CT-guided control framework for the diagnosis of maxillary sinusitis by using a robotic flexible endoscope. In the CT-guided control framework, a feasible path to the target position in the maxillary sinus cavity for the robotic flexible endoscope is designed. Besides, an optimal control scheme is proposed to autonomously control the robotic flexible endoscope to follow the feasible path. This greatly improves the efficiency and reduces the workload for surgeons. Several experiments were conducted based on a widely utilized sinus phantom, and the results showed that the robotic flexible endoscope can accurately and autonomously follow the feasible path and reach the target position in the maxillary sinus cavity. The results also verified the feasibility of the CT-guided control framework, which contributes an effective approach to early diagnosis of sinusitis in the future. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.20374v1-abstract-full').style.display = 'none'; document.getElementById('2410.20374v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.19483">arXiv:2410.19483</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.19483">pdf</a>, <a href="https://arxiv.org/format/2410.19483">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Content-Aware Radiance Fields: Aligning Model Complexity with Scene Intricacy Through Learned Bitwidth Quantization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+W">Weihang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X+X">Xue Xian Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+J">Jingyi Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Lou%2C+X">Xin Lou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.19483v1-abstract-short" style="display: inline;"> The recent popular radiance field models, exemplified by Neural Radiance Fields (NeRF), Instant-NGP and 3D Gaussian Splatting, are designed to represent 3D content by that training models for each individual scene. This unique characteristic of scene representation and per-scene training distinguishes radiance field models from other neural models, because complex scenes necessitate models with hi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.19483v1-abstract-full').style.display = 'inline'; document.getElementById('2410.19483v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.19483v1-abstract-full" style="display: none;"> The recent popular radiance field models, exemplified by Neural Radiance Fields (NeRF), Instant-NGP and 3D Gaussian Splatting, are designed to represent 3D content by that training models for each individual scene. This unique characteristic of scene representation and per-scene training distinguishes radiance field models from other neural models, because complex scenes necessitate models with higher representational capacity and vice versa. In this paper, we propose content-aware radiance fields, aligning the model complexity with the scene intricacies through Adversarial Content-Aware Quantization (A-CAQ). Specifically, we make the bitwidth of parameters differentiable and trainable, tailored to the unique characteristics of specific scenes and requirements. The proposed framework has been assessed on Instant-NGP, a well-known NeRF variant and evaluated using various datasets. Experimental results demonstrate a notable reduction in computational complexity, while preserving the requisite reconstruction and rendering quality, making it beneficial for practical deployment of radiance fields models. Codes are available at https://github.com/WeihangLiu2024/Content_Aware_NeRF. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.19483v1-abstract-full').style.display = 'none'; document.getElementById('2410.19483v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">accepted by ECCV2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.18995">arXiv:2410.18995</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.18995">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> Enhancing Management of Large-Scale Optical Networks through RFID Technology Integration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xiaoying Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Xuan%2C+X">Xingqi Xuan</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+S">Shilie Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Hui%2C+X">Xiaonan Hui</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+X">Xianmin Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.18995v1-abstract-short" style="display: inline;"> Managing large-scale optical distribution networks is a daunting task. This paper introduces a novel solution using radio frequency identification (RFID) technology to transform the procedure we monitor and manage the complex optical network dumb resources (ONDR). By implementing and deploying removable RFID tag pairing based on the serial peripheral interface (SPI) communication protocol, the sys&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.18995v1-abstract-full').style.display = 'inline'; document.getElementById('2410.18995v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.18995v1-abstract-full" style="display: none;"> Managing large-scale optical distribution networks is a daunting task. This paper introduces a novel solution using radio frequency identification (RFID) technology to transform the procedure we monitor and manage the complex optical network dumb resources (ONDR). By implementing and deploying removable RFID tag pairing based on the serial peripheral interface (SPI) communication protocol, the system identifies 30 pairs of tags within one second, even at densities of up to 5.1 tagged components per square inch of patch panel surface area. The integration of light-emitting diode (LED) navigation aids in indicating correctly matched interfaces, effectively addressing the complexities associated with large-scale fiber matching. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.18995v1-abstract-full').style.display = 'none'; document.getElementById('2410.18995v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">2 pages, 4 figures, conference</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.18408">arXiv:2410.18408</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.18408">pdf</a>, <a href="https://arxiv.org/format/2410.18408">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Scale Propagation Network for Generalizable Depth Completion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Haotian Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+M">Meng Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xinhu Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Hua%2C+G">Gang Hua</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.18408v1-abstract-short" style="display: inline;"> Depth completion, inferring dense depth maps from sparse measurements, is crucial for robust 3D perception. Although deep learning based methods have made tremendous progress in this problem, these models cannot generalize well across different scenes that are unobserved in training, posing a fundamental limitation that yet to be overcome. A careful analysis of existing deep neural network archite&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.18408v1-abstract-full').style.display = 'inline'; document.getElementById('2410.18408v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.18408v1-abstract-full" style="display: none;"> Depth completion, inferring dense depth maps from sparse measurements, is crucial for robust 3D perception. Although deep learning based methods have made tremendous progress in this problem, these models cannot generalize well across different scenes that are unobserved in training, posing a fundamental limitation that yet to be overcome. A careful analysis of existing deep neural network architectures for depth completion, which are largely borrowing from successful backbones for image analysis tasks, reveals that a key design bottleneck actually resides in the conventional normalization layers. These normalization layers are designed, on one hand, to make training more stable, on the other hand, to build more visual invariance across scene scales. However, in depth completion, the scale is actually what we want to robustly estimate in order to better generalize to unseen scenes. To mitigate, we propose a novel scale propagation normalization (SP-Norm) method to propagate scales from input to output, and simultaneously preserve the normalization operator for easy convergence. More specifically, we rescale the input using learned features of a single-layer perceptron from the normalized input, rather than directly normalizing the input as conventional normalization layers. We then develop a new network architecture based on SP-Norm and the ConvNeXt V2 backbone. We explore the composition of various basic blocks and architectures to achieve superior performance and efficient inference for generalizable depth completion. Extensive experiments are conducted on six unseen datasets with various types of sparse depth maps, i.e., randomly sampled 0.1\%/1\%/10\% valid pixels, 4/8/16/32/64-line LiDAR points, and holes from Structured-Light. Our model consistently achieves the best accuracy with faster speed and lower memory when compared to state-of-the-art methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.18408v1-abstract-full').style.display = 'none'; document.getElementById('2410.18408v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Major revision in IEEE Transactions on Pattern Analysis and Machine Intelligence</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.18075">arXiv:2410.18075</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.18075">pdf</a>, <a href="https://arxiv.org/format/2410.18075">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> ProFL: Performative Robust Optimal Federated Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xue Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+T">Tian Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+X">Xuwei Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Yener%2C+A">Aylin Yener</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+X">Xueru Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Payani%2C+A">Ali Payani</a>, <a href="/search/cs?searchtype=author&amp;query=Lee%2C+M">Myungjin Lee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.18075v1-abstract-short" style="display: inline;"> Performative prediction (PP) is a framework that captures distribution shifts that occur during the training of machine learning models due to their deployment. As the trained model is used, its generated data could cause the model to evolve, leading to deviations from the original data distribution. The impact of such model-induced distribution shifts in the federated learning (FL) setup remains&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.18075v1-abstract-full').style.display = 'inline'; document.getElementById('2410.18075v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.18075v1-abstract-full" style="display: none;"> Performative prediction (PP) is a framework that captures distribution shifts that occur during the training of machine learning models due to their deployment. As the trained model is used, its generated data could cause the model to evolve, leading to deviations from the original data distribution. The impact of such model-induced distribution shifts in the federated learning (FL) setup remains unexplored despite being increasingly likely to transpire in real-life use cases. Although Jin et al. (2024) recently extended PP to FL in a straightforward manner, the resulting model only converges to a performative stable point, which may be far from optimal. The methods in Izzo et al. (2021); Miller et al. (2021) can find a performative optimal point in centralized settings, but they require the performative risk to be convex and the training data to be noiseless, assumptions often violated in realistic FL systems. This paper overcomes all of these shortcomings and proposes Performative robust optimal Federated Learning (ProFL), an algorithm that finds performative optimal points in FL from noisy and contaminated data. We present the convergence analysis under the Polyak-Lojasiewicz condition, which applies to non-convex objectives. Extensive experiments on multiple datasets validate our proposed algorithms&#39; efficiency. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.18075v1-abstract-full').style.display = 'none'; document.getElementById('2410.18075v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">27 pages with Appendix, 18 figures. The paper has been submitted and is currently under review</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.13257">arXiv:2410.13257</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.13257">pdf</a>, <a href="https://arxiv.org/format/2410.13257">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> scFusionTTT: Single-cell transcriptomics and proteomics fusion with Test-Time Training layers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Meng%2C+D">Dian Meng</a>, <a href="/search/cs?searchtype=author&amp;query=Xing%2C+B">Bohao Xing</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+X">Xinlei Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yanran Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Y">Yijun Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=xiao%2C+Y">Yongjun xiao</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+Z">Zitong Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xubin Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.13257v1-abstract-short" style="display: inline;"> Single-cell multi-omics (scMulti-omics) refers to the paired multimodal data, such as Cellular Indexing of Transcriptomes and Epitopes by Sequencing (CITE-seq), where the regulation of each cell was measured from different modalities, i.e. genes and proteins. scMulti-omics can reveal heterogeneity inside tumors and understand the distinct genetic properties of diverse cell types, which is crucial&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.13257v1-abstract-full').style.display = 'inline'; document.getElementById('2410.13257v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.13257v1-abstract-full" style="display: none;"> Single-cell multi-omics (scMulti-omics) refers to the paired multimodal data, such as Cellular Indexing of Transcriptomes and Epitopes by Sequencing (CITE-seq), where the regulation of each cell was measured from different modalities, i.e. genes and proteins. scMulti-omics can reveal heterogeneity inside tumors and understand the distinct genetic properties of diverse cell types, which is crucial to targeted therapy. Currently, deep learning methods based on attention structures in the bioinformatics area face two challenges. The first challenge is the vast number of genes in a single cell. Traditional attention-based modules struggled to effectively leverage all gene information due to their limited capacity for long-context learning and high-complexity computing. The second challenge is that genes in the human genome are ordered and influence each other&#39;s expression. Most of the methods ignored this sequential information. The recently introduced Test-Time Training (TTT) layer is a novel sequence modeling approach, particularly suitable for handling long contexts like genomics data because TTT layer is a linear complexity sequence modeling structure and is better suited to data with sequential relationships. In this paper, we propose scFusionTTT, a novel method for Single-Cell multimodal omics Fusion with TTT-based masked autoencoder. Of note, we combine the order information of genes and proteins in the human genome with the TTT layer, fuse multimodal omics, and enhance unimodal omics analysis. Finally, the model employs a three-stage training strategy, which yielded the best performance across most metrics in four multimodal omics datasets and four unimodal omics datasets, demonstrating the superior performance of our model. The dataset and code will be available on https://github.com/DM0815/scFusionTTT. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.13257v1-abstract-full').style.display = 'none'; document.getElementById('2410.13257v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.12657">arXiv:2410.12657</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.12657">pdf</a>, <a href="https://arxiv.org/format/2410.12657">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Explanation-Preserving Augmentation for Semi-Supervised Graph Representation Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Z">Zhuomin Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Ni%2C+J">Jingchao Ni</a>, <a href="/search/cs?searchtype=author&amp;query=Salehi%2C+H+A">Hojat Allah Salehi</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xu Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Schafir%2C+E">Esteban Schafir</a>, <a href="/search/cs?searchtype=author&amp;query=Shirani%2C+F">Farhad Shirani</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+D">Dongsheng Luo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.12657v1-abstract-short" style="display: inline;"> Graph representation learning (GRL), enhanced by graph augmentation methods, has emerged as an effective technique achieving performance improvements in wide tasks such as node classification and graph classification. In self-supervised GRL, paired graph augmentations are generated from each graph. Its objective is to infer similar representations for augmentations of the same graph, but maximally&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.12657v1-abstract-full').style.display = 'inline'; document.getElementById('2410.12657v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.12657v1-abstract-full" style="display: none;"> Graph representation learning (GRL), enhanced by graph augmentation methods, has emerged as an effective technique achieving performance improvements in wide tasks such as node classification and graph classification. In self-supervised GRL, paired graph augmentations are generated from each graph. Its objective is to infer similar representations for augmentations of the same graph, but maximally distinguishable representations for augmentations of different graphs. Analogous to image and language domains, the desiderata of an ideal augmentation method include both (1) semantics-preservation; and (2) data-perturbation; i.e., an augmented graph should preserve the semantics of its original graph while carrying sufficient variance. However, most existing (un-)/self-supervised GRL methods focus on data perturbation but largely neglect semantics preservation. To address this challenge, in this paper, we propose a novel method, Explanation-Preserving Augmentation (EPA), that leverages graph explanation techniques for generating augmented graphs that can bridge the gap between semantics-preservation and data-perturbation. EPA first uses a small number of labels to train a graph explainer to infer the sub-structures (explanations) that are most relevant to a graph&#39;s semantics. These explanations are then used to generate semantics-preserving augmentations for self-supervised GRL, namely EPA-GRL. We demonstrate theoretically, using an analytical example, and through extensive experiments on a variety of benchmark datasets that EPA-GRL outperforms the state-of-the-art (SOTA) GRL methods, which are built upon semantics-agnostic data augmentations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.12657v1-abstract-full').style.display = 'none'; document.getElementById('2410.12657v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 pages, 7 figures, 7 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.11397">arXiv:2410.11397</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.11397">pdf</a>, <a href="https://arxiv.org/format/2410.11397">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> FOOGD: Federated Collaboration for Both Out-of-distribution Generalization and Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liao%2C+X">Xinting Liao</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+W">Weiming Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+P">Pengyang Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+F">Fengyuan Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+J">Jiahe Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J">Jun Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+W">Wenjie Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+C">Chaochao Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xiaolin Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.11397v2-abstract-short" style="display: inline;"> Federated learning (FL) is a promising machine learning paradigm that collaborates with client models to capture global knowledge. However, deploying FL models in real-world scenarios remains unreliable due to the coexistence of in-distribution data and unexpected out-of-distribution (OOD) data, such as covariate-shift and semantic-shift data. Current FL researches typically address either covaria&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.11397v2-abstract-full').style.display = 'inline'; document.getElementById('2410.11397v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.11397v2-abstract-full" style="display: none;"> Federated learning (FL) is a promising machine learning paradigm that collaborates with client models to capture global knowledge. However, deploying FL models in real-world scenarios remains unreliable due to the coexistence of in-distribution data and unexpected out-of-distribution (OOD) data, such as covariate-shift and semantic-shift data. Current FL researches typically address either covariate-shift data through OOD generalization or semantic-shift data via OOD detection, overlooking the simultaneous occurrence of various OOD shifts. In this work, we propose FOOGD, a method that estimates the probability density of each client and obtains reliable global distribution as guidance for the subsequent FL process. Firstly, SM3D in FOOGD estimates score model for arbitrary distributions without prior constraints, and detects semantic-shift data powerfully. Then SAG in FOOGD provides invariant yet diverse knowledge for both local covariate-shift generalization and client performance generalization. In empirical validations, FOOGD significantly enjoys three main advantages: (1) reliably estimating non-normalized decentralized distributions, (2) detecting semantic shift data via score values, and (3) generalizing to covariate-shift data by regularizing feature extractor. The prejoct is open in https://github.com/XeniaLLL/FOOGD-main.git. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.11397v2-abstract-full').style.display = 'none'; document.getElementById('2410.11397v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">NeurIPS 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.09403">arXiv:2410.09403</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.09403">pdf</a>, <a href="https://arxiv.org/format/2410.09403">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> </div> </div> <p class="title is-5 mathjax"> Two Heads Are Better Than One: A Multi-Agent System Has the Potential to Improve Scientific Idea Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Su%2C+H">Haoyang Su</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+R">Renqi Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+S">Shixiang Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xinzhe Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+J">Jingzhe Li</a>, <a href="/search/cs?searchtype=author&amp;query=Yin%2C+Z">Zhenfei Yin</a>, <a href="/search/cs?searchtype=author&amp;query=Ouyang%2C+W">Wanli Ouyang</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+N">Nanqing Dong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.09403v1-abstract-short" style="display: inline;"> The rapid advancement of scientific progress requires innovative tools that can accelerate discovery. While recent AI methods, particularly large language models (LLMs), have shown promise in tasks such as hypothesis generation and experimental design, they fall short in replicating the collaborative nature of real-world scientific practices, where diverse teams of experts work together to tackle&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.09403v1-abstract-full').style.display = 'inline'; document.getElementById('2410.09403v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.09403v1-abstract-full" style="display: none;"> The rapid advancement of scientific progress requires innovative tools that can accelerate discovery. While recent AI methods, particularly large language models (LLMs), have shown promise in tasks such as hypothesis generation and experimental design, they fall short in replicating the collaborative nature of real-world scientific practices, where diverse teams of experts work together to tackle complex problems. To address the limitation, we propose an LLM-based multi-agent system, i.e., Virtual Scientists (VirSci), designed to mimic the teamwork inherent in scientific research. VirSci organizes a team of agents to collaboratively generate, evaluate, and refine research ideas. Through comprehensive experiments, we demonstrate that this multi-agent approach outperforms the state-of-the-art method in producing novel and impactful scientific ideas, showing potential in aligning with key insights in the Science of Science field. Our findings suggest that integrating collaborative agents can lead to more innovative scientific outputs, offering a robust system for autonomous scientific discovery. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.09403v1-abstract-full').style.display = 'none'; document.getElementById('2410.09403v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.07137">arXiv:2410.07137</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.07137">pdf</a>, <a href="https://arxiv.org/format/2410.07137">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Cheating Automatic LLM Benchmarks: Null Models Achieve High Win Rates </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xiaosen Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Pang%2C+T">Tianyu Pang</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+C">Chao Du</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Q">Qian Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+J">Jing Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+M">Min Lin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.07137v1-abstract-short" style="display: inline;"> Automatic LLM benchmarks, such as AlpacaEval 2.0, Arena-Hard-Auto, and MT-Bench, have become popular for evaluating language models due to their cost-effectiveness and scalability compared to human evaluation. Achieving high win rates on these benchmarks can significantly boost the promotional impact of newly released language models. This promotional benefit may motivate tricks, such as manipulat&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.07137v1-abstract-full').style.display = 'inline'; document.getElementById('2410.07137v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.07137v1-abstract-full" style="display: none;"> Automatic LLM benchmarks, such as AlpacaEval 2.0, Arena-Hard-Auto, and MT-Bench, have become popular for evaluating language models due to their cost-effectiveness and scalability compared to human evaluation. Achieving high win rates on these benchmarks can significantly boost the promotional impact of newly released language models. This promotional benefit may motivate tricks, such as manipulating model output length or style to game win rates, even though several mechanisms have been developed to control length and disentangle style to reduce gameability. Nonetheless, we show that even a &#34;null model&#34; that always outputs a constant response (irrelevant to input instructions) can cheat automatic benchmarks and achieve top-ranked win rates: an 86.5% LC win rate on AlpacaEval 2.0; an 83.0 score on Arena-Hard-Auto; and a 9.55 score on MT-Bench. Moreover, the crafted cheating outputs are transferable because we assume that the instructions of these benchmarks (e.g., 805 samples of AlpacaEval 2.0) are private and cannot be accessed. While our experiments are primarily proof-of-concept, an adversary could use LLMs to generate more imperceptible cheating responses, unethically benefiting from high win rates and promotional impact. Our findings call for the development of anti-cheating mechanisms for reliable automatic benchmarks. The code is available at https://github.com/sail-sg/Cheating-LLM-Benchmarks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.07137v1-abstract-full').style.display = 'none'; document.getElementById('2410.07137v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.02970">arXiv:2410.02970</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.02970">pdf</a>, <a href="https://arxiv.org/format/2410.02970">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> F-Fidelity: A Robust Framework for Faithfulness Evaluation of Explainable AI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xu Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Shirani%2C+F">Farhad Shirani</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Z">Zhuomin Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+C">Chaohao Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+W">Wei Cheng</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+W">Wenbo Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+D">Dongsheng Luo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.02970v1-abstract-short" style="display: inline;"> Recent research has developed a number of eXplainable AI (XAI) techniques. Although extracting meaningful insights from deep learning models, how to properly evaluate these XAI methods remains an open problem. The most widely used approach is to perturb or even remove what the XAI method considers to be the most important features in an input and observe the changes in the output prediction. This&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.02970v1-abstract-full').style.display = 'inline'; document.getElementById('2410.02970v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.02970v1-abstract-full" style="display: none;"> Recent research has developed a number of eXplainable AI (XAI) techniques. Although extracting meaningful insights from deep learning models, how to properly evaluate these XAI methods remains an open problem. The most widely used approach is to perturb or even remove what the XAI method considers to be the most important features in an input and observe the changes in the output prediction. This approach although efficient suffers the Out-of-Distribution (OOD) problem as the perturbed samples may no longer follow the original data distribution. A recent method RemOve And Retrain (ROAR) solves the OOD issue by retraining the model with perturbed samples guided by explanations. However, the training may not always converge given the distribution difference. Furthermore, using the model retrained based on XAI methods to evaluate these explainers may cause information leakage and thus lead to unfair comparisons. We propose Fine-tuned Fidelity F-Fidelity, a robust evaluation framework for XAI, which utilizes i) an explanation-agnostic fine-tuning strategy, thus mitigating the information leakage issue and ii) a random masking operation that ensures that the removal step does not generate an OOD input. We designed controlled experiments with state-of-the-art (SOTA) explainers and their degraded version to verify the correctness of our framework. We conducted experiments on multiple data structures, such as images, time series, and natural language. The results demonstrate that F-Fidelity significantly improves upon prior evaluation metrics in recovering the ground-truth ranking of the explainers. Furthermore, we show both theoretically and empirically that, given a faithful explainer, F-Fidelity metric can be used to compute the sparsity of influential input components, i.e., to extract the true explanation size. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.02970v1-abstract-full').style.display = 'none'; document.getElementById('2410.02970v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Preprint; 26 pages, 4 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.02551">arXiv:2410.02551</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.02551">pdf</a>, <a href="https://arxiv.org/format/2410.02551">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> ColaCare: Enhancing Electronic Health Record Modeling through Large Language Model-Driven Multi-Agent Collaboration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zixiang Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+Y">Yinghao Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+H">Huiya Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xiaochen Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+T">Tianlong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+W">Wen Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yasha Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+C">Chengwei Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Harrison%2C+E+M">Ewen M. Harrison</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+J">Junyi Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+L">Liantao Ma</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.02551v1-abstract-short" style="display: inline;"> We introduce ColaCare, a framework that enhances Electronic Health Record (EHR) modeling through multi-agent collaboration driven by Large Language Models (LLMs). Our approach seamlessly integrates domain-specific expert models with LLMs to bridge the gap between structured EHR data and text-based reasoning. Inspired by clinical consultations, ColaCare employs two types of agents: DoctorAgent and&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.02551v1-abstract-full').style.display = 'inline'; document.getElementById('2410.02551v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.02551v1-abstract-full" style="display: none;"> We introduce ColaCare, a framework that enhances Electronic Health Record (EHR) modeling through multi-agent collaboration driven by Large Language Models (LLMs). Our approach seamlessly integrates domain-specific expert models with LLMs to bridge the gap between structured EHR data and text-based reasoning. Inspired by clinical consultations, ColaCare employs two types of agents: DoctorAgent and MetaAgent, which collaboratively analyze patient data. Expert models process and generate predictions from numerical EHR data, while LLM agents produce reasoning references and decision-making reports within the collaborative consultation framework. We additionally incorporate the Merck Manual of Diagnosis and Therapy (MSD) medical guideline within a retrieval-augmented generation (RAG) module for authoritative evidence support. Extensive experiments conducted on four distinct EHR datasets demonstrate ColaCare&#39;s superior performance in mortality prediction tasks, underscoring its potential to revolutionize clinical decision support systems and advance personalized precision medicine. The code, complete prompt templates, more case studies, etc. are publicly available at the anonymous link: https://colacare.netlify.app. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.02551v1-abstract-full').style.display = 'none'; document.getElementById('2410.02551v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.02033">arXiv:2410.02033</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.02033">pdf</a>, <a href="https://arxiv.org/format/2410.02033">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Model Comparisons: XNet Outperforms KAN </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+X">Xin Li</a>, <a href="/search/cs?searchtype=author&amp;query=Xia%2C+Z+J">Zhihong Jeff Xia</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xiaotao Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.02033v1-abstract-short" style="display: inline;"> In the fields of computational mathematics and artificial intelligence, the need for precise data modeling is crucial, especially for predictive machine learning tasks. This paper explores further XNet, a novel algorithm that employs the complex-valued Cauchy integral formula, offering a superior network architecture that surpasses traditional Multi-Layer Perceptrons (MLPs) and Kolmogorov-Arnold N&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.02033v1-abstract-full').style.display = 'inline'; document.getElementById('2410.02033v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.02033v1-abstract-full" style="display: none;"> In the fields of computational mathematics and artificial intelligence, the need for precise data modeling is crucial, especially for predictive machine learning tasks. This paper explores further XNet, a novel algorithm that employs the complex-valued Cauchy integral formula, offering a superior network architecture that surpasses traditional Multi-Layer Perceptrons (MLPs) and Kolmogorov-Arnold Networks (KANs). XNet significant improves speed and accuracy across various tasks in both low and high-dimensional spaces, redefining the scope of data-driven model development and providing substantial improvements over established time series models like LSTMs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.02033v1-abstract-full').style.display = 'none'; document.getElementById('2410.02033v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.00068">arXiv:2410.00068</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.00068">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applications">stat.AP</span> </div> </div> <p class="title is-5 mathjax"> Denoising Variational Autoencoder as a Feature Reduction Pipeline for the diagnosis of Autism based on Resting-state fMRI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xinyuan Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Ravid%2C+O">Orren Ravid</a>, <a href="/search/cs?searchtype=author&amp;query=Barry%2C+R+A+J">Robert A. J. Barry</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+Y">Yoojean Kim</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Q">Qian Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+Y">Young-geun Kim</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+X">Xi Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+X">Xiaofu He</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.00068v1-abstract-short" style="display: inline;"> Autism spectrum disorders (ASDs) are developmental conditions characterized by restricted interests and difficulties in communication. The complexity of ASD has resulted in a deficiency of objective diagnostic biomarkers. Deep learning methods have gained recognition for addressing these challenges in neuroimaging analysis, but finding and interpreting such diagnostic biomarkers are still challeng&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.00068v1-abstract-full').style.display = 'inline'; document.getElementById('2410.00068v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.00068v1-abstract-full" style="display: none;"> Autism spectrum disorders (ASDs) are developmental conditions characterized by restricted interests and difficulties in communication. The complexity of ASD has resulted in a deficiency of objective diagnostic biomarkers. Deep learning methods have gained recognition for addressing these challenges in neuroimaging analysis, but finding and interpreting such diagnostic biomarkers are still challenging computationally. We propose an ASD feature reduction pipeline using resting-state fMRI (rs-fMRI). We used Ncuts parcellations and Power atlas to extract functional connectivity data, resulting in over 30 thousand features. Then the pipeline further compresses the connectivities into 5 latent Gaussian distributions, providing is a low-dimensional representation of the data, using a denoising variational autoencoder (DVAE). To test the method, we employed the extracted latent features from the DVAE to classify ASD using traditional classifiers such as support vector machine (SVM) on a large multi-site dataset. The 95% confidence interval for the prediction accuracy of the SVM is [0.63, 0.76] after site harmonization using the extracted latent distributions. Without using DVAE, the prediction accuracy is 0.70, which falls within the interval. This implies that the model successfully encodes the diagnostic information in rs-fMRI data to 5 Gaussian distributions (10 features) without sacrificing prediction performance. The runtime for training the DVAE and obtaining classification results from its extracted latent features (37 minutes) was 7 times shorter compared to training classifiers directly on the raw connectivity matrices (5-6 hours). Our findings also suggest that the Power atlas provides more effective brain connectivity insights for diagnosing ASD than Ncuts parcellations. The encoded features can be used for the help of diagnosis and interpretation of the disease. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.00068v1-abstract-full').style.display = 'none'; document.getElementById('2410.00068v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> J.3; I.4.9; I.4.10 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.20223">arXiv:2409.20223</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.20223">pdf</a>, <a href="https://arxiv.org/format/2409.20223">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> GTransPDM: A Graph-embedded Transformer with Positional Decoupling for Pedestrian Crossing Intention Prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xie%2C+C">Chen Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+C">Ciyun Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xiaoyu Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Gong%2C+B">Bowen Gong</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+D">Dayong Wu</a>, <a href="/search/cs?searchtype=author&amp;query=L%C3%B3pez%2C+A+M">Antonio M. L贸pez</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.20223v1-abstract-short" style="display: inline;"> Understanding and predicting pedestrian crossing behavioral intention is crucial for autonomous vehicles driving safety. Nonetheless, challenges emerge when using promising images or environmental context masks to extract various factors for time-series network modeling, causing pre-processing errors or a loss in efficiency. Typically, pedestrian positions captured by onboard cameras are often dis&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.20223v1-abstract-full').style.display = 'inline'; document.getElementById('2409.20223v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.20223v1-abstract-full" style="display: none;"> Understanding and predicting pedestrian crossing behavioral intention is crucial for autonomous vehicles driving safety. Nonetheless, challenges emerge when using promising images or environmental context masks to extract various factors for time-series network modeling, causing pre-processing errors or a loss in efficiency. Typically, pedestrian positions captured by onboard cameras are often distorted and do not accurately reflect their actual movements. To address these issues, GTransPDM -- a Graph-embedded Transformer with a Position Decoupling Module -- was developed for pedestrian crossing intention prediction by leveraging multi-modal features. First, a positional decoupling module was proposed to decompose the pedestrian lateral movement and simulate depth variations in the image view. Then, a graph-embedded Transformer was designed to capture the spatial-temporal dynamics of human pose skeletons, integrating essential factors such as position, skeleton, and ego-vehicle motion. Experimental results indicate that the proposed method achieves 92% accuracy on the PIE dataset and 87% accuracy on the JAAD dataset, with a processing speed of 0.05ms. It outperforms the state-of-the-art in comparison. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.20223v1-abstract-full').style.display = 'none'; document.getElementById('2409.20223v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.19818">arXiv:2409.19818</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.19818">pdf</a>, <a href="https://arxiv.org/format/2409.19818">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.21437/Interspeech.2024-1969">10.21437/Interspeech.2024-1969 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Fine-Tuning Automatic Speech Recognition for People with Parkinson&#39;s: An Effective Strategy for Enhancing Speech Technology Accessibility </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xiuwen Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Phukon%2C+B">Bornali Phukon</a>, <a href="/search/cs?searchtype=author&amp;query=Hasegawa-Johnson%2C+M">Mark Hasegawa-Johnson</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.19818v1-abstract-short" style="display: inline;"> This paper enhances dysarthric and dysphonic speech recognition by fine-tuning pretrained automatic speech recognition (ASR) models on the 2023-10-05 data package of the Speech Accessibility Project (SAP), which contains the speech of 253 people with Parkinson&#39;s disease. Experiments tested methods that have been effective for Cerebral Palsy, including the use of speaker clustering and severity-dep&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.19818v1-abstract-full').style.display = 'inline'; document.getElementById('2409.19818v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.19818v1-abstract-full" style="display: none;"> This paper enhances dysarthric and dysphonic speech recognition by fine-tuning pretrained automatic speech recognition (ASR) models on the 2023-10-05 data package of the Speech Accessibility Project (SAP), which contains the speech of 253 people with Parkinson&#39;s disease. Experiments tested methods that have been effective for Cerebral Palsy, including the use of speaker clustering and severity-dependent models, weighted fine-tuning, and multi-task learning. Best results were obtained using a multi-task learning model, in which the ASR is trained to produce an estimate of the speaker&#39;s impairment severity as an auxiliary output. The resulting word error rates are considerably improved relative to a baseline model fine-tuned using only Librispeech data, with word error rate improvements of 37.62\% and 26.97\% compared to fine-tuning on 100h and 960h of LibriSpeech data, respectively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.19818v1-abstract-full').style.display = 'none'; document.getElementById('2409.19818v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Proceedings of Interspeech 2024 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.16913">arXiv:2409.16913</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.16913">pdf</a>, <a href="https://arxiv.org/format/2409.16913">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Tell Me What You Don&#39;t Know: Enhancing Refusal Capabilities of Role-Playing Agents via Representation Space Analysis and Editing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+W">Wenhao Liu</a>, <a href="/search/cs?searchtype=author&amp;query=An%2C+S">Siyu An</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+J">Junru Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+M">Muling Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+T">Tianlong Li</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+X">Xiaohua Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xiaoqing Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Yin%2C+D">Di Yin</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+X">Xing Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+X">Xuanjing Huang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.16913v1-abstract-short" style="display: inline;"> Role-Playing Agents (RPAs) have shown remarkable performance in various applications, yet they often struggle to recognize and appropriately respond to hard queries that conflict with their role-play knowledge. To investigate RPAs&#39; performance when faced with different types of conflicting requests, we develop an evaluation benchmark that includes contextual knowledge conflicting requests, paramet&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.16913v1-abstract-full').style.display = 'inline'; document.getElementById('2409.16913v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.16913v1-abstract-full" style="display: none;"> Role-Playing Agents (RPAs) have shown remarkable performance in various applications, yet they often struggle to recognize and appropriately respond to hard queries that conflict with their role-play knowledge. To investigate RPAs&#39; performance when faced with different types of conflicting requests, we develop an evaluation benchmark that includes contextual knowledge conflicting requests, parametric knowledge conflicting requests, and non-conflicting requests to assess RPAs&#39; ability to identify conflicts and refuse to answer appropriately without over-refusing. Through extensive evaluation, we find that most RPAs behave significant performance gaps toward different conflict requests. To elucidate the reasons, we conduct an in-depth representation-level analysis of RPAs under various conflict scenarios. Our findings reveal the existence of rejection regions and direct response regions within the model&#39;s forwarding representation, and thus influence the RPA&#39;s final response behavior. Therefore, we introduce a lightweight representation editing approach that conveniently shifts conflicting requests to the rejection region, thereby enhancing the model&#39;s refusal accuracy. The experimental results validate the effectiveness of our editing method, improving RPAs&#39; refusal ability of conflicting requests while maintaining their general role-playing capabilities. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.16913v1-abstract-full').style.display = 'none'; document.getElementById('2409.16913v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.16694">arXiv:2409.16694</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.16694">pdf</a>, <a href="https://arxiv.org/format/2409.16694">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> A Survey of Low-bit Large Language Models: Basics, Systems, and Algorithms </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Gong%2C+R">Ruihao Gong</a>, <a href="/search/cs?searchtype=author&amp;query=Ding%2C+Y">Yifu Ding</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zining Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Lv%2C+C">Chengtao Lv</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xingyu Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+J">Jinyang Du</a>, <a href="/search/cs?searchtype=author&amp;query=Qin%2C+H">Haotong Qin</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+J">Jinyang Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Magno%2C+M">Michele Magno</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+X">Xianglong Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.16694v2-abstract-short" style="display: inline;"> Large language models (LLMs) have achieved remarkable advancements in natural language processing, showcasing exceptional performance across various tasks. However, the expensive memory and computational requirements present significant challenges for their practical deployment. Low-bit quantization has emerged as a critical approach to mitigate these challenges by reducing the bit-width of model&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.16694v2-abstract-full').style.display = 'inline'; document.getElementById('2409.16694v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.16694v2-abstract-full" style="display: none;"> Large language models (LLMs) have achieved remarkable advancements in natural language processing, showcasing exceptional performance across various tasks. However, the expensive memory and computational requirements present significant challenges for their practical deployment. Low-bit quantization has emerged as a critical approach to mitigate these challenges by reducing the bit-width of model parameters, activations, and gradients, thus decreasing memory usage and computational demands. This paper presents a comprehensive survey of low-bit quantization methods tailored for LLMs, covering the fundamental principles, system implementations, and algorithmic strategies. An overview of basic concepts and new data formats specific to low-bit LLMs is first introduced, followed by a review of frameworks and systems that facilitate low-bit LLMs across various hardware platforms. Then, we categorize and analyze techniques and toolkits for efficient low-bit training and inference of LLMs. Finally, we conclude with a discussion of future trends and potential advancements of low-bit LLMs. Our systematic overview from basic, system, and algorithm perspectives can offer valuable insights and guidelines for future works to enhance the efficiency and applicability of LLMs through low-bit quantization. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.16694v2-abstract-full').style.display = 'none'; document.getElementById('2409.16694v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 25 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Ruihao Gong leads the overall organization of the survey, with Yifu Ding and Jinyang Du contributing to Sections 2 and 3. Xingyu Zheng is responsible for authoring Section 4, while Chengtao Lv and Zining Wang collaborate on Section 5. Haotong Qin, Jinyang Guo, Michele Magno, and Xianglong Liu provide guidance during the whole process and assist in refining the final manuscript</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.14652">arXiv:2409.14652</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.14652">pdf</a>, <a href="https://arxiv.org/format/2409.14652">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> AEANet: Affinity Enhanced Attentional Networks for Arbitrary Style Transfer </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+G">Gen Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xianqiu Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yujian Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.14652v2-abstract-short" style="display: inline;"> Arbitrary artistic style transfer is a research area that combines rational academic study with emotive artistic creation. It aims to create a new image from a content image according to a target artistic style, maintaining the content&#39;s textural structural information while incorporating the artistic characteristics of the style image. However, existing style transfer methods often significantly&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.14652v2-abstract-full').style.display = 'inline'; document.getElementById('2409.14652v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.14652v2-abstract-full" style="display: none;"> Arbitrary artistic style transfer is a research area that combines rational academic study with emotive artistic creation. It aims to create a new image from a content image according to a target artistic style, maintaining the content&#39;s textural structural information while incorporating the artistic characteristics of the style image. However, existing style transfer methods often significantly damage the texture lines of the content image during the style transformation. To address these issues, we propose affinity-enhanced attentional network, which include the content affinity-enhanced attention (CAEA) module, the style affinity-enhanced attention (SAEA) module, and the hybrid attention (HA) module. The CAEA and SAEA modules first use attention to enhance content and style representations, followed by a detail enhanced (DE) module to reinforce detail features. The hybrid attention module adjusts the style feature distribution based on the content feature distribution. We also introduce the local dissimilarity loss based on affinity attention, which better preserves the affinity with content and style images. Experiments demonstrate that our work achieves better results in arbitrary style transfer than other state-of-the-art methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.14652v2-abstract-full').style.display = 'none'; document.getElementById('2409.14652v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 5 figures,1 table</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.12778">arXiv:2409.12778</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.12778">pdf</a>, <a href="https://arxiv.org/format/2409.12778">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> EventDance++: Language-guided Unsupervised Source-free Cross-modal Adaptation for Event-based Object Recognition </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xu Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+L">Lin Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.12778v2-abstract-short" style="display: inline;"> In this paper, we address the challenging problem of cross-modal (image-to-events) adaptation for event-based recognition without accessing any labeled source image data. This task is arduous due to the substantial modality gap between images and events. With only a pre-trained source model available, the key challenge lies in extracting knowledge from this model and effectively transferring knowl&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.12778v2-abstract-full').style.display = 'inline'; document.getElementById('2409.12778v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.12778v2-abstract-full" style="display: none;"> In this paper, we address the challenging problem of cross-modal (image-to-events) adaptation for event-based recognition without accessing any labeled source image data. This task is arduous due to the substantial modality gap between images and events. With only a pre-trained source model available, the key challenge lies in extracting knowledge from this model and effectively transferring knowledge to the event-based domain. Inspired by the natural ability of language to convey semantics across different modalities, we propose EventDance++, a novel framework that tackles this unsupervised source-free cross-modal adaptation problem from a language-guided perspective. We introduce a language-guided reconstruction-based modality bridging (L-RMB) module, which reconstructs intensity frames from events in a self-supervised manner. Importantly, it leverages a vision-language model to provide further supervision, enriching the surrogate images and enhancing modality bridging. This enables the creation of surrogate images to extract knowledge (i.e., labels) from the source model. On top, we propose a multi-representation knowledge adaptation (MKA) module to transfer knowledge to target models, utilizing multiple event representations to capture the spatiotemporal characteristics of events fully. The L-RMB and MKA modules are jointly optimized to achieve optimal performance in bridging the modality gap. Experiments on three benchmark datasets demonstrate that EventDance++ performs on par with methods that utilize source data, validating the effectiveness of our language-guided approach in event-based recognition. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.12778v2-abstract-full').style.display = 'none'; document.getElementById('2409.12778v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: text overlap with arXiv:2403.14082</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.12728">arXiv:2409.12728</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.12728">pdf</a>, <a href="https://arxiv.org/format/2409.12728">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Genomics">q-bio.GN</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> PRAGA: Prototype-aware Graph Adaptive Aggregation for Spatial Multi-modal Omics Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Huang%2C+X">Xinlei Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+Z">Zhiqi Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Meng%2C+D">Dian Meng</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yanran Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Ruan%2C+S">Shiwei Ruan</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+Q">Qingqiang Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xubin Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Qiao%2C+Z">Ziyue Qiao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.12728v3-abstract-short" style="display: inline;"> Spatial multi-modal omics technology, highlighted by Nature Methods as an advanced biological technique in 2023, plays a critical role in resolving biological regulatory processes with spatial context. Recently, graph neural networks based on K-nearest neighbor (KNN) graphs have gained prominence in spatial multi-modal omics methods due to their ability to model semantic relations between sequenci&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.12728v3-abstract-full').style.display = 'inline'; document.getElementById('2409.12728v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.12728v3-abstract-full" style="display: none;"> Spatial multi-modal omics technology, highlighted by Nature Methods as an advanced biological technique in 2023, plays a critical role in resolving biological regulatory processes with spatial context. Recently, graph neural networks based on K-nearest neighbor (KNN) graphs have gained prominence in spatial multi-modal omics methods due to their ability to model semantic relations between sequencing spots. However, the fixed KNN graph fails to capture the latent semantic relations hidden by the inevitable data perturbations during the biological sequencing process, resulting in the loss of semantic information. In addition, the common lack of spot annotation and class number priors in practice further hinders the optimization of spatial multi-modal omics models. Here, we propose a novel spatial multi-modal omics resolved framework, termed PRototype-Aware Graph Adaptative Aggregation for Spatial Multi-modal Omics Analysis (PRAGA). PRAGA constructs a dynamic graph to capture latent semantic relations and comprehensively integrate spatial information and feature semantics. The learnable graph structure can also denoise perturbations by learning cross-modal knowledge. Moreover, a dynamic prototype contrastive learning is proposed based on the dynamic adaptability of Bayesian Gaussian Mixture Models to optimize the multi-modal omics representations for unknown biological priors. Quantitative and qualitative experiments on simulated and real datasets with 7 competing methods demonstrate the superior performance of PRAGA. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.12728v3-abstract-full').style.display = 'none'; document.getElementById('2409.12728v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.10064">arXiv:2409.10064</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.10064">pdf</a>, <a href="https://arxiv.org/format/2409.10064">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> MindGuard: Towards Accessible and Sitgma-free Mental Health First Aid via Edge LLM </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ji%2C+S">Sijie Ji</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xinzhe Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+J">Jiawei Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+R">Renqi Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+W">Wei Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Srivastava%2C+M">Mani Srivastava</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.10064v1-abstract-short" style="display: inline;"> Mental health disorders are among the most prevalent diseases worldwide, affecting nearly one in four people. Despite their widespread impact, the intervention rate remains below 25%, largely due to the significant cooperation required from patients for both diagnosis and intervention. The core issue behind this low treatment rate is stigma, which discourages over half of those affected from seeki&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.10064v1-abstract-full').style.display = 'inline'; document.getElementById('2409.10064v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.10064v1-abstract-full" style="display: none;"> Mental health disorders are among the most prevalent diseases worldwide, affecting nearly one in four people. Despite their widespread impact, the intervention rate remains below 25%, largely due to the significant cooperation required from patients for both diagnosis and intervention. The core issue behind this low treatment rate is stigma, which discourages over half of those affected from seeking help. This paper presents MindGuard, an accessible, stigma-free, and professional mobile mental healthcare system designed to provide mental health first aid. The heart of MindGuard is an innovative edge LLM, equipped with professional mental health knowledge, that seamlessly integrates objective mobile sensor data with subjective Ecological Momentary Assessment records to deliver personalized screening and intervention conversations. We conduct a broad evaluation of MindGuard using open datasets spanning four years and real-world deployment across various mobile devices involving 20 subjects for two weeks. Remarkably, MindGuard achieves results comparable to GPT-4 and outperforms its counterpart with more than 10 times the model size. We believe that MindGuard paves the way for mobile LLM applications, potentially revolutionizing mental healthcare practices by substituting self-reporting and intervention conversations with passive, integrated monitoring within daily life, thus ensuring accessible and stigma-free mental health support. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.10064v1-abstract-full').style.display = 'none'; document.getElementById('2409.10064v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.09356">arXiv:2409.09356</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.09356">pdf</a>, <a href="https://arxiv.org/format/2409.09356">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3691620.3695262">10.1145/3691620.3695262 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Towards Robust Detection of Open Source Software Supply Chain Poisoning Attacks in Industry Environments </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xinyi Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Wei%2C+C">Chen Wei</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Shenao Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Y">Yanjie Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+P">Peiming Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yuanchao Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+K">Kailong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Haoyu Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.09356v1-abstract-short" style="display: inline;"> The exponential growth of open-source package ecosystems, particularly NPM and PyPI, has led to an alarming increase in software supply chain poisoning attacks. Existing static analysis methods struggle with high false positive rates and are easily thwarted by obfuscation and dynamic code execution techniques. While dynamic analysis approaches offer improvements, they often suffer from capturing n&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.09356v1-abstract-full').style.display = 'inline'; document.getElementById('2409.09356v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.09356v1-abstract-full" style="display: none;"> The exponential growth of open-source package ecosystems, particularly NPM and PyPI, has led to an alarming increase in software supply chain poisoning attacks. Existing static analysis methods struggle with high false positive rates and are easily thwarted by obfuscation and dynamic code execution techniques. While dynamic analysis approaches offer improvements, they often suffer from capturing non-package behaviors and employing simplistic testing strategies that fail to trigger sophisticated malicious behaviors. To address these challenges, we present OSCAR, a robust dynamic code poisoning detection pipeline for NPM and PyPI ecosystems. OSCAR fully executes packages in a sandbox environment, employs fuzz testing on exported functions and classes, and implements aspect-based behavior monitoring with tailored API hook points. We evaluate OSCAR against six existing tools using a comprehensive benchmark dataset of real-world malicious and benign packages. OSCAR achieves an F1 score of 0.95 in NPM and 0.91 in PyPI, confirming that OSCAR is as effective as the current state-of-the-art technologies. Furthermore, for benign packages exhibiting characteristics typical of malicious packages, OSCAR reduces the false positive rate by an average of 32.06% in NPM (from 34.63% to 2.57%) and 39.87% in PyPI (from 41.10% to 1.23%), compared to other tools, significantly reducing the workload of manual reviews in real-world deployments. In cooperation with Ant Group, a leading financial technology company, we have deployed OSCAR on its NPM and PyPI mirrors since January 2023, identifying 10,404 malicious NPM packages and 1,235 malicious PyPI packages over 18 months. This work not only bridges the gap between academic research and industrial application in code poisoning detection but also provides a robust and practical solution that has been thoroughly tested in a real-world industrial setting. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.09356v1-abstract-full').style.display = 'none'; document.getElementById('2409.09356v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in the 39th IEEE/ACM International Conference on Automated Software Engineering (ASE&#39;24 Industry Showcase), October 27-November 1, 2024, Sacramento, CA, USA</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.07694">arXiv:2409.07694</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.07694">pdf</a>, <a href="https://arxiv.org/format/2409.07694">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Learn from Balance: Rectifying Knowledge Transfer for Long-Tailed Scenarios </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Huang%2C+X">Xinlei Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+J">Jialiang Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xubin Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+J">Jinjia Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+W">Wenxin Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+N">Ning Jiang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.07694v2-abstract-short" style="display: inline;"> Knowledge Distillation (KD) transfers knowledge from a large pre-trained teacher network to a compact and efficient student network, making it suitable for deployment on resource-limited media terminals. However, traditional KD methods require balanced data to ensure robust training, which is often unavailable in practical applications. In such scenarios, a few head categories occupy a substantial&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.07694v2-abstract-full').style.display = 'inline'; document.getElementById('2409.07694v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.07694v2-abstract-full" style="display: none;"> Knowledge Distillation (KD) transfers knowledge from a large pre-trained teacher network to a compact and efficient student network, making it suitable for deployment on resource-limited media terminals. However, traditional KD methods require balanced data to ensure robust training, which is often unavailable in practical applications. In such scenarios, a few head categories occupy a substantial proportion of examples. This imbalance biases the trained teacher network towards the head categories, resulting in severe performance degradation on the less represented tail categories for both the teacher and student networks. In this paper, we propose a novel framework called Knowledge Rectification Distillation (KRDistill) to address the imbalanced knowledge inherited in the teacher network through the incorporation of the balanced category priors. Furthermore, we rectify the biased predictions produced by the teacher network, particularly focusing on the tail categories. Consequently, the teacher network can provide balanced and accurate knowledge to train a reliable student network. Intensive experiments conducted on various long-tailed datasets demonstrate that our KRDistill can effectively train reliable student networks in realistic scenarios of data imbalance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.07694v2-abstract-full').style.display = 'none'; document.getElementById('2409.07694v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.07475">arXiv:2409.07475</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.07475">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Cross-Cultural Communication in the Digital Age: An Analysis of Cultural Representation and Inclusivity in Emojis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+L">Lingfeng Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xiangwen Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.07475v1-abstract-short" style="display: inline;"> Emojis have become a universal language in the digital world, enabling users to express emotions, ideas, and identities across diverse cultural contexts. As emojis incorporate more cultural symbols and diverse representations, they play a crucial role in cross-cultural communication. This research project aims to analyze the representation of different cultures in emojis, investigate how emojis fa&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.07475v1-abstract-full').style.display = 'inline'; document.getElementById('2409.07475v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.07475v1-abstract-full" style="display: none;"> Emojis have become a universal language in the digital world, enabling users to express emotions, ideas, and identities across diverse cultural contexts. As emojis incorporate more cultural symbols and diverse representations, they play a crucial role in cross-cultural communication. This research project aims to analyze the representation of different cultures in emojis, investigate how emojis facilitate cross-cultural communication and promote inclusivity, and explore the impact of emojis on understanding and interpretation in different cultural contexts. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.07475v1-abstract-full').style.display = 'none'; document.getElementById('2409.07475v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">24 pages, 7 figures, 2 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.07016">arXiv:2409.07016</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.07016">pdf</a>, <a href="https://arxiv.org/format/2409.07016">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> Improving Anomalous Sound Detection via Low-Rank Adaptation Fine-Tuning of Pre-Trained Audio Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xinhu Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+A">Anbai Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+B">Bing Han</a>, <a href="/search/cs?searchtype=author&amp;query=Qian%2C+Y">Yanmin Qian</a>, <a href="/search/cs?searchtype=author&amp;query=Fan%2C+P">Pingyi Fan</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+J">Jia Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+W">Wei-Qiang Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.07016v1-abstract-short" style="display: inline;"> Anomalous Sound Detection (ASD) has gained significant interest through the application of various Artificial Intelligence (AI) technologies in industrial settings. Though possessing great potential, ASD systems can hardly be readily deployed in real production sites due to the generalization problem, which is primarily caused by the difficulty of data collection and the complexity of environmenta&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.07016v1-abstract-full').style.display = 'inline'; document.getElementById('2409.07016v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.07016v1-abstract-full" style="display: none;"> Anomalous Sound Detection (ASD) has gained significant interest through the application of various Artificial Intelligence (AI) technologies in industrial settings. Though possessing great potential, ASD systems can hardly be readily deployed in real production sites due to the generalization problem, which is primarily caused by the difficulty of data collection and the complexity of environmental factors. This paper introduces a robust ASD model that leverages audio pre-trained models. Specifically, we fine-tune these models using machine operation data, employing SpecAug as a data augmentation strategy. Additionally, we investigate the impact of utilizing Low-Rank Adaptation (LoRA) tuning instead of full fine-tuning to address the problem of limited data for fine-tuning. Our experiments on the DCASE2023 Task 2 dataset establish a new benchmark of 77.75% on the evaluation set, with a significant improvement of 6.48% compared with previous state-of-the-art (SOTA) models, including top-tier traditional convolutional networks and speech pre-trained models, which demonstrates the effectiveness of audio pre-trained models with LoRA tuning. Ablation studies are also conducted to showcase the efficacy of the proposed scheme. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.07016v1-abstract-full').style.display = 'none'; document.getElementById('2409.07016v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.01781">arXiv:2409.01781</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.01781">pdf</a>, <a href="https://arxiv.org/format/2409.01781">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Dual Advancement of Representation Learning and Clustering for Sparse and Noisy Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+W">Wenlin Li</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Y">Yucheng Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xiaoqing Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+S">Suoya Han</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J">Jun Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+X">Xiaobo Sun</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.01781v2-abstract-short" style="display: inline;"> Sparse and noisy images (SNIs), like those in spatial gene expression data, pose significant challenges for effective representation learning and clustering, which are essential for thorough data analysis and interpretation. In response to these challenges, we propose Dual Advancement of Representation Learning and Clustering (DARLC), an innovative framework that leverages contrastive learning to&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.01781v2-abstract-full').style.display = 'inline'; document.getElementById('2409.01781v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.01781v2-abstract-full" style="display: none;"> Sparse and noisy images (SNIs), like those in spatial gene expression data, pose significant challenges for effective representation learning and clustering, which are essential for thorough data analysis and interpretation. In response to these challenges, we propose Dual Advancement of Representation Learning and Clustering (DARLC), an innovative framework that leverages contrastive learning to enhance the representations derived from masked image modeling. Simultaneously, DARLC integrates cluster assignments in a cohesive, end-to-end approach. This integrated clustering strategy addresses the &#34;class collision problem&#34; inherent in contrastive learning, thus improving the quality of the resulting representations. To generate more plausible positive views for contrastive learning, we employ a graph attention network-based technique that produces denoised images as augmented data. As such, our framework offers a comprehensive approach that improves the learning of representations by enhancing their local perceptibility, distinctiveness, and the understanding of relational semantics. Furthermore, we utilize a Student&#39;s t mixture model to achieve more robust and adaptable clustering of SNIs. Extensive experiments, conducted across 12 different types of datasets consisting of SNIs, demonstrate that DARLC surpasses the state-of-the-art methods in both image clustering and generating image representations that accurately capture gene interactions. Code is available at https://github.com/zipging/DARLC. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.01781v2-abstract-full').style.display = 'none'; document.getElementById('2409.01781v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.00695">arXiv:2409.00695</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.00695">pdf</a>, <a href="https://arxiv.org/format/2409.00695">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Curriculum Prompting Foundation Models for Medical Image Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xiuqi Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yuhang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H">Haoran Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Liang%2C+H">Hongrui Liang</a>, <a href="/search/cs?searchtype=author&amp;query=Bao%2C+X">Xueqi Bao</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Z">Zhuqing Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Lao%2C+Q">Qicheng Lao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.00695v1-abstract-short" style="display: inline;"> Adapting large pre-trained foundation models, e.g., SAM, for medical image segmentation remains a significant challenge. A crucial step involves the formulation of a series of specialized prompts that incorporate specific clinical instructions. Past works have been heavily reliant on a singular type of prompt for each instance, necessitating manual input of an ideally correct prompt, which is less&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.00695v1-abstract-full').style.display = 'inline'; document.getElementById('2409.00695v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.00695v1-abstract-full" style="display: none;"> Adapting large pre-trained foundation models, e.g., SAM, for medical image segmentation remains a significant challenge. A crucial step involves the formulation of a series of specialized prompts that incorporate specific clinical instructions. Past works have been heavily reliant on a singular type of prompt for each instance, necessitating manual input of an ideally correct prompt, which is less efficient. To tackle this issue, we propose to utilize prompts of different granularity, which are sourced from original images to provide a broader scope of clinical insights. However, combining prompts of varying types can pose a challenge due to potential conflicts. In response, we have designed a coarse-to-fine mechanism, referred to as curriculum prompting, that progressively integrates prompts of different types. Through extensive experiments on three public medical datasets across various modalities, we demonstrate the effectiveness of our proposed approach, which not only automates the prompt generation process but also yields superior performance compared to other SAM-based medical image segmentation methods. Code is available at: https://github.com/AnnaZzz-zxq/Curriculum-Prompting. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.00695v1-abstract-full').style.display = 'none'; document.getElementById('2409.00695v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by MICCAI 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.16326">arXiv:2408.16326</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.16326">pdf</a>, <a href="https://arxiv.org/format/2408.16326">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Critic-CoT: Boosting the reasoning abilities of large language model via Chain-of-thoughts Critic </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xin Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Lou%2C+J">Jie Lou</a>, <a href="/search/cs?searchtype=author&amp;query=Cao%2C+B">Boxi Cao</a>, <a href="/search/cs?searchtype=author&amp;query=Wen%2C+X">Xueru Wen</a>, <a href="/search/cs?searchtype=author&amp;query=Ji%2C+Y">Yuqiu Ji</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+H">Hongyu Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+Y">Yaojie Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+X">Xianpei Han</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+D">Debing Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+L">Le Sun</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.16326v2-abstract-short" style="display: inline;"> Self-critic has become a crucial mechanism for enhancing the reasoning performance of LLMs. However, current approaches mainly involve basic prompts for intuitive instance-level feedback, which resembles System-1 processes and limits the reasoning capabilities. Moreover, there is a lack of in-depth investigations into the relationship between LLM&#39;s ability to criticize and its task-solving perform&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.16326v2-abstract-full').style.display = 'inline'; document.getElementById('2408.16326v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.16326v2-abstract-full" style="display: none;"> Self-critic has become a crucial mechanism for enhancing the reasoning performance of LLMs. However, current approaches mainly involve basic prompts for intuitive instance-level feedback, which resembles System-1 processes and limits the reasoning capabilities. Moreover, there is a lack of in-depth investigations into the relationship between LLM&#39;s ability to criticize and its task-solving performance. To address these issues, we propose Critic-CoT, a novel framework that pushes LLMs toward System-2-like critic capability. Through a step-wise CoT reasoning paradigm and the automatic construction of distant-supervision data without human annotation, Critic-CoT enables LLMs to engage in slow, analytic self-critique and refinement, thereby improving their reasoning abilities. Experiments on GSM8K and MATH demonstrate that our enhanced model significantly boosts task-solving performance by filtering out invalid solutions or iterative refinement. Furthermore, we investigate the intrinsic correlation between critique and task-solving abilities within LLMs, discovering that these abilities can mutually reinforce each other rather than conflict. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.16326v2-abstract-full').style.display = 'none'; document.getElementById('2408.16326v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">under review</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.14732">arXiv:2408.14732</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.14732">pdf</a>, <a href="https://arxiv.org/format/2408.14732">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Graphics">cs.GR</span> </div> </div> <p class="title is-5 mathjax"> OctFusion: Octree-based Diffusion Models for 3D Shape Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xiong%2C+B">Bojun Xiong</a>, <a href="/search/cs?searchtype=author&amp;query=Wei%2C+S">Si-Tong Wei</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xin-Yang Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Cao%2C+Y">Yan-Pei Cao</a>, <a href="/search/cs?searchtype=author&amp;query=Lian%2C+Z">Zhouhui Lian</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+P">Peng-Shuai Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.14732v1-abstract-short" style="display: inline;"> Diffusion models have emerged as a popular method for 3D generation. However, it is still challenging for diffusion models to efficiently generate diverse and high-quality 3D shapes. In this paper, we introduce OctFusion, which can generate 3D shapes with arbitrary resolutions in 2.5 seconds on a single Nvidia 4090 GPU, and the extracted meshes are guaranteed to be continuous and manifold. The key&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.14732v1-abstract-full').style.display = 'inline'; document.getElementById('2408.14732v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.14732v1-abstract-full" style="display: none;"> Diffusion models have emerged as a popular method for 3D generation. However, it is still challenging for diffusion models to efficiently generate diverse and high-quality 3D shapes. In this paper, we introduce OctFusion, which can generate 3D shapes with arbitrary resolutions in 2.5 seconds on a single Nvidia 4090 GPU, and the extracted meshes are guaranteed to be continuous and manifold. The key components of OctFusion are the octree-based latent representation and the accompanying diffusion models. The representation combines the benefits of both implicit neural representations and explicit spatial octrees and is learned with an octree-based variational autoencoder. The proposed diffusion model is a unified multi-scale U-Net that enables weights and computation sharing across different octree levels and avoids the complexity of widely used cascaded diffusion schemes. We verify the effectiveness of OctFusion on the ShapeNet and Objaverse datasets and achieve state-of-the-art performances on shape generation tasks. We demonstrate that OctFusion is extendable and flexible by generating high-quality color fields for textured mesh generation and high-quality 3D shapes conditioned on text prompts, sketches, or category labels. Our code and pre-trained models are available at \url{https://github.com/octree-nn/octfusion}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.14732v1-abstract-full').style.display = 'none'; document.getElementById('2408.14732v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Technical Report</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.13461">arXiv:2408.13461</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.13461">pdf</a>, <a href="https://arxiv.org/format/2408.13461">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Probing the Robustness of Vision-Language Pretrained Models: A Multimodal Adversarial Attack Approach </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Guan%2C+J">Jiwei Guan</a>, <a href="/search/cs?searchtype=author&amp;query=Ding%2C+T">Tianyu Ding</a>, <a href="/search/cs?searchtype=author&amp;query=Cao%2C+L">Longbing Cao</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+L">Lei Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+C">Chen Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xi Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.13461v1-abstract-short" style="display: inline;"> Vision-language pretraining (VLP) with transformers has demonstrated exceptional performance across numerous multimodal tasks. However, the adversarial robustness of these models has not been thoroughly investigated. Existing multimodal attack methods have largely overlooked cross-modal interactions between visual and textual modalities, particularly in the context of cross-attention mechanisms. I&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.13461v1-abstract-full').style.display = 'inline'; document.getElementById('2408.13461v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.13461v1-abstract-full" style="display: none;"> Vision-language pretraining (VLP) with transformers has demonstrated exceptional performance across numerous multimodal tasks. However, the adversarial robustness of these models has not been thoroughly investigated. Existing multimodal attack methods have largely overlooked cross-modal interactions between visual and textual modalities, particularly in the context of cross-attention mechanisms. In this paper, we study the adversarial vulnerability of recent VLP transformers and design a novel Joint Multimodal Transformer Feature Attack (JMTFA) that concurrently introduces adversarial perturbations in both visual and textual modalities under white-box settings. JMTFA strategically targets attention relevance scores to disrupt important features within each modality, generating adversarial samples by fusing perturbations and leading to erroneous model predictions. Experimental results indicate that the proposed approach achieves high attack success rates on vision-language understanding and reasoning downstream tasks compared to existing baselines. Notably, our findings reveal that the textual modality significantly influences the complex fusion processes within VLP transformers. Moreover, we observe no apparent relationship between model size and adversarial robustness under our proposed attacks. These insights emphasize a new dimension of adversarial robustness and underscore potential risks in the reliable deployment of multimodal AI systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.13461v1-abstract-full').style.display = 'none'; document.getElementById('2408.13461v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.12829">arXiv:2408.12829</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.12829">pdf</a>, <a href="https://arxiv.org/format/2408.12829">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> Uncertainty-Aware Mean Opinion Score Prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hui Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+S">Shiwan Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+J">Jiaming Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xiguang Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+H">Haoqin Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+X">Xuechen Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Qin%2C+Y">Yong Qin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.12829v1-abstract-short" style="display: inline;"> Mean Opinion Score (MOS) prediction has made significant progress in specific domains. However, the unstable performance of MOS prediction models across diverse samples presents ongoing challenges in the practical application of these systems. In this paper, we point out that the absence of uncertainty modeling is a significant limitation hindering MOS prediction systems from applying to the real&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.12829v1-abstract-full').style.display = 'inline'; document.getElementById('2408.12829v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.12829v1-abstract-full" style="display: none;"> Mean Opinion Score (MOS) prediction has made significant progress in specific domains. However, the unstable performance of MOS prediction models across diverse samples presents ongoing challenges in the practical application of these systems. In this paper, we point out that the absence of uncertainty modeling is a significant limitation hindering MOS prediction systems from applying to the real and open world. We analyze the sources of uncertainty in the MOS prediction task and propose to establish an uncertainty-aware MOS prediction system that models aleatory uncertainty and epistemic uncertainty by heteroscedastic regression and Monte Carlo dropout separately. The experimental results show that the system captures uncertainty well and is capable of performing selective prediction and out-of-domain detection. Such capabilities significantly enhance the practical utility of MOS systems in diverse real and open-world environments. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.12829v1-abstract-full').style.display = 'none'; document.getElementById('2408.12829v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by Interspeech 2024, oral</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.10614">arXiv:2408.10614</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.10614">pdf</a>, <a href="https://arxiv.org/format/2408.10614">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Generalizable Facial Expression Recognition </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yuhang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xiuqi Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Liang%2C+C">Chenyi Liang</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+J">Jiani Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weihong Deng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.10614v1-abstract-short" style="display: inline;"> SOTA facial expression recognition (FER) methods fail on test sets that have domain gaps with the train set. Recent domain adaptation FER methods need to acquire labeled or unlabeled samples of target domains to fine-tune the FER model, which might be infeasible in real-world deployment. In this paper, we aim to improve the zero-shot generalization ability of FER methods on different unseen test s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.10614v1-abstract-full').style.display = 'inline'; document.getElementById('2408.10614v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.10614v1-abstract-full" style="display: none;"> SOTA facial expression recognition (FER) methods fail on test sets that have domain gaps with the train set. Recent domain adaptation FER methods need to acquire labeled or unlabeled samples of target domains to fine-tune the FER model, which might be infeasible in real-world deployment. In this paper, we aim to improve the zero-shot generalization ability of FER methods on different unseen test sets using only one train set. Inspired by how humans first detect faces and then select expression features, we propose a novel FER pipeline to extract expression-related features from any given face images. Our method is based on the generalizable face features extracted by large models like CLIP. However, it is non-trivial to adapt the general features of CLIP for specific tasks like FER. To preserve the generalization ability of CLIP and the high precision of the FER model, we design a novel approach that learns sigmoid masks based on the fixed CLIP face features to extract expression features. To further improve the generalization ability on unseen test sets, we separate the channels of the learned masked features according to the expression classes to directly generate logits and avoid using the FC layer to reduce overfitting. We also introduce a channel-diverse loss to make the learned masks separated. Extensive experiments on five different FER datasets verify that our method outperforms SOTA FER methods by large margins. Code is available in https://github.com/zyh-uaiaaaa/Generalizable-FER. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.10614v1-abstract-full').style.display = 'none'; document.getElementById('2408.10614v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by ECCV2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.10235">arXiv:2408.10235</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.10235">pdf</a>, <a href="https://arxiv.org/format/2408.10235">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Multi-Source EEG Emotion Recognition via Dynamic Contrastive Domain Adaptation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xiao%2C+Y">Yun Xiao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yimeng Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Peng%2C+X">Xiaopeng Peng</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+S">Shuzheng Han</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xia Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Fang%2C+D">Dingyi Fang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+X">Xiaojiang Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.10235v1-abstract-short" style="display: inline;"> Electroencephalography (EEG) provides reliable indications of human cognition and mental states. Accurate emotion recognition from EEG remains challenging due to signal variations among individuals and across measurement sessions. To address these challenges, we introduce a multi-source dynamic contrastive domain adaptation method (MS-DCDA), which models coarse-grained inter-domain and fine-graine&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.10235v1-abstract-full').style.display = 'inline'; document.getElementById('2408.10235v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.10235v1-abstract-full" style="display: none;"> Electroencephalography (EEG) provides reliable indications of human cognition and mental states. Accurate emotion recognition from EEG remains challenging due to signal variations among individuals and across measurement sessions. To address these challenges, we introduce a multi-source dynamic contrastive domain adaptation method (MS-DCDA), which models coarse-grained inter-domain and fine-grained intra-class adaptations through a multi-branch contrastive neural network and contrastive sub-domain discrepancy learning. Our model leverages domain knowledge from each individual source and a complementary source ensemble and uses dynamically weighted learning to achieve an optimal tradeoff between domain transferability and discriminability. The proposed MS-DCDA model was evaluated using the SEED and SEED-IV datasets, achieving respectively the highest mean accuracies of $90.84\%$ and $78.49\%$ in cross-subject experiments as well as $95.82\%$ and $82.25\%$ in cross-session experiments. Our model outperforms several alternative domain adaptation methods in recognition accuracy, inter-class margin, and intra-class compactness. Our study also suggests greater emotional sensitivity in the frontal and parietal brain lobes, providing insights for mental health interventions, personalized medicine, and development of preventive strategies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.10235v1-abstract-full').style.display = 'none'; document.getElementById('2408.10235v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.09115">arXiv:2408.09115</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.09115">pdf</a>, <a href="https://arxiv.org/format/2408.09115">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> GoodSAM++: Bridging Domain and Capacity Gaps via Segment Anything Model for Panoramic Semantic Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+W">Weiming Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yexin Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xu Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+L">Lin Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.09115v1-abstract-short" style="display: inline;"> This paper presents GoodSAM++, a novel framework utilizing the powerful zero-shot instance segmentation capability of SAM (i.e., teacher) to learn a compact panoramic semantic segmentation model, i.e., student, without requiring any labeled data. GoodSAM++ addresses two critical challenges: 1) SAM&#39;s inability to provide semantic labels and inherent distortion problems of panoramic images; 2) the s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.09115v1-abstract-full').style.display = 'inline'; document.getElementById('2408.09115v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.09115v1-abstract-full" style="display: none;"> This paper presents GoodSAM++, a novel framework utilizing the powerful zero-shot instance segmentation capability of SAM (i.e., teacher) to learn a compact panoramic semantic segmentation model, i.e., student, without requiring any labeled data. GoodSAM++ addresses two critical challenges: 1) SAM&#39;s inability to provide semantic labels and inherent distortion problems of panoramic images; 2) the significant capacity disparity between SAM and the student. The `out-of-the-box&#39; insight of GoodSAM++ is to introduce a teacher assistant (TA) to provide semantic information for SAM, integrated with SAM to obtain reliable pseudo semantic maps to bridge both domain and capacity gaps. To make this possible, we first propose a Distortion-Aware Rectification (DARv2) module to address the domain gap. It effectively mitigates the object deformation and distortion problem in panoramic images to obtain pseudo semantic maps. We then introduce a Multi-level Knowledge Adaptation (MKA) module to efficiently transfer the semantic information from the TA and pseudo semantic maps to our compact student model, addressing the significant capacity gap. We conduct extensive experiments on both outdoor and indoor benchmark datasets, showing that our GoodSAM++ achieves a remarkable performance improvement over the state-of-the-art (SOTA) domain adaptation methods. Moreover, diverse open-world scenarios demonstrate the generalization capacity of our GoodSAM++. Last but not least, our most lightweight student model achieves comparable performance to the SOTA models with only 3.7 million parameters. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.09115v1-abstract-full').style.display = 'none'; document.getElementById('2408.09115v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages, under review. arXiv admin note: substantial text overlap with arXiv:2403.16370</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.06300">arXiv:2408.06300</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.06300">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Materials Science">cond-mat.mtrl-sci</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Inverse designing metamaterials with programmable nonlinear functional responses in graph space </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Maurizi%2C+M">Marco Maurizi</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+D">Derek Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yu-Tong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yao%2C+D">Desheng Yao</a>, <a href="/search/cs?searchtype=author&amp;query=Hahn%2C+D">David Hahn</a>, <a href="/search/cs?searchtype=author&amp;query=Oudich%2C+M">Mourad Oudich</a>, <a href="/search/cs?searchtype=author&amp;query=Satpati%2C+A">Anish Satpati</a>, <a href="/search/cs?searchtype=author&amp;query=Bauchy%2C+M">Mathieu Bauchy</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+W">Wei Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+Y">Yizhou Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Jing%2C+Y">Yun Jing</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X+R">Xiaoyu Rayne Zheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.06300v1-abstract-short" style="display: inline;"> Material responses to static and dynamic stimuli, represented as nonlinear curves, are design targets for engineering functionalities like structural support, impact protection, and acoustic and photonic bandgaps. Three-dimensional metamaterials offer significant tunability due to their internal structure, yet existing methods struggle to capture their complex behavior-to-structure relationships.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.06300v1-abstract-full').style.display = 'inline'; document.getElementById('2408.06300v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.06300v1-abstract-full" style="display: none;"> Material responses to static and dynamic stimuli, represented as nonlinear curves, are design targets for engineering functionalities like structural support, impact protection, and acoustic and photonic bandgaps. Three-dimensional metamaterials offer significant tunability due to their internal structure, yet existing methods struggle to capture their complex behavior-to-structure relationships. We present GraphMetaMat, a graph-based framework capable of designing three-dimensional metamaterials with programmable responses and arbitrary manufacturing constraints. Integrating graph networks, physics biases, reinforcement learning, and tree search, GraphMetaMat can target stress-strain curves spanning four orders of magnitude and complex behaviors, as well as viscoelastic transmission responses with varying attenuation gaps. GraphMetaMat can create cushioning materials for protective equipment and vibration-damping panels for electric vehicles, outperforming commercial materials, and enabling the automatic design of materials with on-demand functionalities. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.06300v1-abstract-full').style.display = 'none'; document.getElementById('2408.06300v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">19 pages, 5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.06019">arXiv:2408.06019</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.06019">pdf</a>, <a href="https://arxiv.org/format/2408.06019">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> HeadGAP: Few-shot 3D Head Avatar via Generalizable Gaussian Priors </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xiaozheng Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Wen%2C+C">Chao Wen</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Z">Zhaohu Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+W">Weiyi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Su%2C+Z">Zhuo Su</a>, <a href="/search/cs?searchtype=author&amp;query=Chang%2C+X">Xu Chang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Y">Yang Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Lv%2C+Z">Zheng Lv</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+X">Xiaoyuan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yongjie Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+G">Guidong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+L">Lan Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.06019v1-abstract-short" style="display: inline;"> In this paper, we present a novel 3D head avatar creation approach capable of generalizing from few-shot in-the-wild data with high-fidelity and animatable robustness. Given the underconstrained nature of this problem, incorporating prior knowledge is essential. Therefore, we propose a framework comprising prior learning and avatar creation phases. The prior learning phase leverages 3D head priors&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.06019v1-abstract-full').style.display = 'inline'; document.getElementById('2408.06019v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.06019v1-abstract-full" style="display: none;"> In this paper, we present a novel 3D head avatar creation approach capable of generalizing from few-shot in-the-wild data with high-fidelity and animatable robustness. Given the underconstrained nature of this problem, incorporating prior knowledge is essential. Therefore, we propose a framework comprising prior learning and avatar creation phases. The prior learning phase leverages 3D head priors derived from a large-scale multi-view dynamic dataset, and the avatar creation phase applies these priors for few-shot personalization. Our approach effectively captures these priors by utilizing a Gaussian Splatting-based auto-decoder network with part-based dynamic modeling. Our method employs identity-shared encoding with personalized latent codes for individual identities to learn the attributes of Gaussian primitives. During the avatar creation phase, we achieve fast head avatar personalization by leveraging inversion and fine-tuning strategies. Extensive experiments demonstrate that our model effectively exploits head priors and successfully generalizes them to few-shot personalization, achieving photo-realistic rendering quality, multi-view consistency, and stable animation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.06019v1-abstract-full').style.display = 'none'; document.getElementById('2408.06019v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Project page: https://headgap.github.io/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.05211">arXiv:2408.05211</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.05211">pdf</a>, <a href="https://arxiv.org/format/2408.05211">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> VITA: Towards Open-Source Interactive Omni Multimodal LLM </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Fu%2C+C">Chaoyou Fu</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+H">Haojia Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Long%2C+Z">Zuwei Long</a>, <a href="/search/cs?searchtype=author&amp;query=Shen%2C+Y">Yunhang Shen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+M">Meng Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yifan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+S">Shaoqi Dong</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+X">Xiong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yin%2C+D">Di Yin</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+L">Long Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xiawu Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+R">Ran He</a>, <a href="/search/cs?searchtype=author&amp;query=Ji%2C+R">Rongrong Ji</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Y">Yunsheng Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Shan%2C+C">Caifeng Shan</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+X">Xing Sun</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.05211v2-abstract-short" style="display: inline;"> The remarkable multimodal capabilities and interactive experience of GPT-4o underscore their necessity in practical applications, yet open-source models rarely excel in both areas. In this paper, we introduce VITA, the first-ever open-source Multimodal Large Language Model (MLLM) adept at simultaneous processing and analysis of Video, Image, Text, and Audio modalities, and meanwhile has an advance&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.05211v2-abstract-full').style.display = 'inline'; document.getElementById('2408.05211v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.05211v2-abstract-full" style="display: none;"> The remarkable multimodal capabilities and interactive experience of GPT-4o underscore their necessity in practical applications, yet open-source models rarely excel in both areas. In this paper, we introduce VITA, the first-ever open-source Multimodal Large Language Model (MLLM) adept at simultaneous processing and analysis of Video, Image, Text, and Audio modalities, and meanwhile has an advanced multimodal interactive experience. Starting from Mixtral 8x7B as a language foundation, we expand its Chinese vocabulary followed by bilingual instruction tuning. We further endow the language model with visual and audio capabilities through two-stage multi-task learning of multimodal alignment and instruction tuning. VITA demonstrates robust foundational capabilities of multilingual, vision, and audio understanding, as evidenced by its strong performance across a range of both unimodal and multimodal benchmarks. Beyond foundational capabilities, we have made considerable progress in enhancing the natural multimodal human-computer interaction experience. VITA is the first step for the open-source community to explore the seamless integration of multimodal understanding and interaction. While there is still lots of work to be done on VITA to get close to close-source counterparts, we hope that its role as a pioneer can serve as a cornerstone for subsequent research. Project Page: https://vita-home.github.io. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.05211v2-abstract-full').style.display = 'none'; document.getElementById('2408.05211v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Project Page: https://vita-home.github.io</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.03124">arXiv:2408.03124</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.03124">pdf</a>, <a href="https://arxiv.org/format/2408.03124">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Closed-loop Diffusion Control of Complex Physical Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wei%2C+L">Long Wei</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+H">Haodong Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Y">Yuchen Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+R">Ruiqi Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+P">Peiyan Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xiang Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+T">Tao Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Fan%2C+D">Dixia Fan</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+T">Tailin Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.03124v2-abstract-short" style="display: inline;"> The control problems of complex physical systems have broad applications in science and engineering. Previous studies have shown that generative control methods based on diffusion models offer significant advantages for solving these problems. However, existing generative control approaches face challenges in both performance and efficiency when extended to the closed-loop setting, which is essent&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.03124v2-abstract-full').style.display = 'inline'; document.getElementById('2408.03124v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.03124v2-abstract-full" style="display: none;"> The control problems of complex physical systems have broad applications in science and engineering. Previous studies have shown that generative control methods based on diffusion models offer significant advantages for solving these problems. However, existing generative control approaches face challenges in both performance and efficiency when extended to the closed-loop setting, which is essential for effective control. In this paper, we propose an efficient Closed-Loop Diffusion method for Physical systems Control (CL-DiffPhyCon). By employing an asynchronous denoising framework for different physical time steps, CL-DiffPhyCon generates control signals conditioned on real-time feedback from the environment with significantly reduced computational cost during sampling. Additionally, the control process could be further accelerated by incorporating fast sampling techniques, such as DDIM. We evaluate CL-DiffPhyCon on two tasks: 1D Burgers&#39; equation control and 2D incompressible fluid control. The results demonstrate that CL-DiffPhyCon achieves superior control performance with significant improvements in sampling efficiency. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.03124v2-abstract-full').style.display = 'none'; document.getElementById('2408.03124v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 31 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.01934">arXiv:2408.01934</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.01934">pdf</a>, <a href="https://arxiv.org/format/2408.01934">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> A Survey and Evaluation of Adversarial Attacks for Object Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+K+N+T">Khoi Nguyen Tiet Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+W">Wenyu Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+K">Kangkang Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Y">Yuhuan Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xingjian Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+H+L">Hui Li Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhen%2C+L">Liangli Zhen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.01934v2-abstract-short" style="display: inline;"> Deep learning models excel in various computer vision tasks but are susceptible to adversarial examples-subtle perturbations in input data that lead to incorrect predictions. This vulnerability poses significant risks in safety-critical applications such as autonomous vehicles, security surveillance, and aircraft health monitoring. While numerous surveys focus on adversarial attacks in image class&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.01934v2-abstract-full').style.display = 'inline'; document.getElementById('2408.01934v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.01934v2-abstract-full" style="display: none;"> Deep learning models excel in various computer vision tasks but are susceptible to adversarial examples-subtle perturbations in input data that lead to incorrect predictions. This vulnerability poses significant risks in safety-critical applications such as autonomous vehicles, security surveillance, and aircraft health monitoring. While numerous surveys focus on adversarial attacks in image classification, the literature on such attacks in object detection is limited. This paper offers a comprehensive taxonomy of adversarial attacks specific to object detection, reviews existing adversarial robustness evaluation metrics, and systematically assesses open-source attack methods and model robustness. Key observations are provided to enhance the understanding of attack effectiveness and corresponding countermeasures. Additionally, we identify crucial research challenges to guide future efforts in securing automated object detection systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.01934v2-abstract-full').style.display = 'none'; document.getElementById('2408.01934v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.21075">arXiv:2407.21075</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.21075">pdf</a>, <a href="https://arxiv.org/format/2407.21075">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Apple Intelligence Foundation Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Gunter%2C+T">Tom Gunter</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zirui Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+C">Chong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Pang%2C+R">Ruoming Pang</a>, <a href="/search/cs?searchtype=author&amp;query=Narayanan%2C+A">Andy Narayanan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+A">Aonan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+B">Bowen Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+C">Chen Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Chiu%2C+C">Chung-Cheng Chiu</a>, <a href="/search/cs?searchtype=author&amp;query=Qiu%2C+D">David Qiu</a>, <a href="/search/cs?searchtype=author&amp;query=Gopinath%2C+D">Deepak Gopinath</a>, <a href="/search/cs?searchtype=author&amp;query=Yap%2C+D+A">Dian Ang Yap</a>, <a href="/search/cs?searchtype=author&amp;query=Yin%2C+D">Dong Yin</a>, <a href="/search/cs?searchtype=author&amp;query=Nan%2C+F">Feng Nan</a>, <a href="/search/cs?searchtype=author&amp;query=Weers%2C+F">Floris Weers</a>, <a href="/search/cs?searchtype=author&amp;query=Yin%2C+G">Guoli Yin</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+H">Haoshuo Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J">Jianyu Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+J">Jiarui Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Peebles%2C+J">John Peebles</a>, <a href="/search/cs?searchtype=author&amp;query=Ye%2C+K">Ke Ye</a>, <a href="/search/cs?searchtype=author&amp;query=Lee%2C+M">Mark Lee</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+N">Nan Du</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Q">Qibin Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Keunebroek%2C+Q">Quentin Keunebroek</a> , et al. (130 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.21075v1-abstract-short" style="display: inline;"> We present foundation language models developed to power Apple Intelligence features, including a ~3 billion parameter model designed to run efficiently on devices and a large server-based language model designed for Private Cloud Compute. These models are designed to perform a wide range of tasks efficiently, accurately, and responsibly. This report describes the model architecture, the data used&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.21075v1-abstract-full').style.display = 'inline'; document.getElementById('2407.21075v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.21075v1-abstract-full" style="display: none;"> We present foundation language models developed to power Apple Intelligence features, including a ~3 billion parameter model designed to run efficiently on devices and a large server-based language model designed for Private Cloud Compute. These models are designed to perform a wide range of tasks efficiently, accurately, and responsibly. This report describes the model architecture, the data used to train the model, the training process, how the models are optimized for inference, and the evaluation results. We highlight our focus on Responsible AI and how the principles are applied throughout the model development. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.21075v1-abstract-full').style.display = 'none'; document.getElementById('2407.21075v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Zheng%2C+X&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Zheng%2C+X&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zheng%2C+X&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zheng%2C+X&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zheng%2C+X&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Zheng%2C+X&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">&hellip;</span></li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10