CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–50 of 622 results for author: <span class="mathjax">Zhu, C</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&query=Zhu%2C+C">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Zhu, C"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Zhu%2C+C&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Zhu, C"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&query=Zhu%2C+C&start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&query=Zhu%2C+C&start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&query=Zhu%2C+C&start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&query=Zhu%2C+C&start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&query=Zhu%2C+C&start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&query=Zhu%2C+C&start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">…</span></li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.14292">arXiv:2411.14292</a> <span> [<a href="https://arxiv.org/pdf/2411.14292">pdf</a>, <a href="https://arxiv.org/format/2411.14292">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Hypothesis testing of symmetry in quantum dynamics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chen%2C+Y">Yu-Ao Chen</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chenghong Zhu</a>, <a href="/search/cs?searchtype=author&query=He%2C+K">Keming He</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+Y">Yingjian Liu</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+X">Xin Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.14292v1-abstract-short" style="display: inline;"> Symmetry plays a crucial role in quantum physics, dictating the behavior and dynamics of physical systems. In this paper, We develop a hypothesis-testing framework for quantum dynamics symmetry using a limited number of queries to the unknown unitary operation and establish the quantum max-relative entropy lower bound for the type-II error. We construct optimal ancilla-free protocols that achieve… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14292v1-abstract-full').style.display = 'inline'; document.getElementById('2411.14292v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.14292v1-abstract-full" style="display: none;"> Symmetry plays a crucial role in quantum physics, dictating the behavior and dynamics of physical systems. In this paper, We develop a hypothesis-testing framework for quantum dynamics symmetry using a limited number of queries to the unknown unitary operation and establish the quantum max-relative entropy lower bound for the type-II error. We construct optimal ancilla-free protocols that achieve optimal type-II error probability for testing time-reversal symmetry (T-symmetry) and diagonal symmetry (Z-symmetry) with limited queries. Contrasting with the advantages of indefinite causal order strategies in various quantum information processing tasks, we show that parallel, adaptive, and indefinite causal order strategies have equal power for our tasks. We establish optimal protocols for T-symmetry testing and Z-symmetry testing for 6 and 5 queries, respectively, from which we infer that the type-II error exhibits a decay rate of $\mathcal{O}(m^{-2})$ with respect to the number of queries $m$. This represents a significant improvement over the basic repetition protocols without using global entanglement, where the error decays at a slower rate of $\mathcal{O}(m^{-1})$. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.14292v1-abstract-full').style.display = 'none'; document.getElementById('2411.14292v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13909">arXiv:2411.13909</a> <span> [<a href="https://arxiv.org/pdf/2411.13909">pdf</a>, <a href="https://arxiv.org/format/2411.13909">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Panther: Illuminate the Sight of Multimodal LLMs with Instruction-Guided Visual Prompts </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Li%2C+H">Honglin Li</a>, <a href="/search/cs?searchtype=author&query=Gao%2C+Y">Yuting Gao</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chenglu Zhu</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+J">Jingdong Chen</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+M">Ming Yang</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+L">Lin Yang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13909v1-abstract-short" style="display: inline;"> Multimodal large language models (MLLMs) are closing the gap to human visual perception capability rapidly, while, still lag behind on attending to subtle images details or locating small objects precisely, etc. Common schemes to tackle these issues include deploying multiple vision encoders or operating on original high-resolution images. Few studies have concentrated on taking the textual instru… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13909v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13909v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13909v1-abstract-full" style="display: none;"> Multimodal large language models (MLLMs) are closing the gap to human visual perception capability rapidly, while, still lag behind on attending to subtle images details or locating small objects precisely, etc. Common schemes to tackle these issues include deploying multiple vision encoders or operating on original high-resolution images. Few studies have concentrated on taking the textual instruction into improving visual representation, resulting in losing focus in some vision-centric tasks, a phenomenon we herein termed as Amblyopia. In this work, we introduce Panther, a MLLM that closely adheres to user instruction and locates targets of interests precisely, with the finesse of a black panther. Specifically, Panther comprises three integral components: Panther-VE, Panther-Bridge, and Panther-Decoder. Panther-VE integrates user instruction information at the early stages of the vision encoder, thereby extracting the most relevant and useful visual representations. The Panther-Bridge module, equipped with powerful filtering capabilities, significantly reduces redundant visual information, leading to a substantial savings in training costs. The Panther-Decoder is versatile and can be employed with any decoder-only architecture of LLMs without discrimination. Experimental results, particularly on vision-centric benchmarks, have demonstrated the effectiveness of Panther. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13909v1-abstract-full').style.display = 'none'; document.getElementById('2411.13909v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11777">arXiv:2411.11777</a> <span> [<a href="https://arxiv.org/pdf/2411.11777">pdf</a>, <a href="https://arxiv.org/ps/2411.11777">ps</a>, <a href="https://arxiv.org/format/2411.11777">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Assistive Control of Knee Exoskeletons for Human Walking on Granular Terrains </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chunchu Zhu</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+X">Xunjie Chen</a>, <a href="/search/cs?searchtype=author&query=Yi%2C+J">Jingang Yi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11777v1-abstract-short" style="display: inline;"> Human walkers traverse diverse environments and demonstrate different gait locomotion and energy cost on granular terrains compared to solid ground. We present a stiffness-based model predictive control approach of knee exoskeleton assistance on sand. The gait and locomotion comparison is first discussed for human walkers on sand and solid ground. A machine learning-based estimation scheme is then… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11777v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11777v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11777v1-abstract-full" style="display: none;"> Human walkers traverse diverse environments and demonstrate different gait locomotion and energy cost on granular terrains compared to solid ground. We present a stiffness-based model predictive control approach of knee exoskeleton assistance on sand. The gait and locomotion comparison is first discussed for human walkers on sand and solid ground. A machine learning-based estimation scheme is then presented to predict the ground reaction forces (GRFs) for human walkers on different terrains in real time. Built on the estimated GRFs and human joint torques, a knee exoskeleton controller is designed to provide assistive torque through a model predictive stiffness control scheme. We conduct indoor and outdoor experiments to validate the modeling and control design and their performance. The experiments demonstrate the major muscle activation and metabolic reductions by respectively 15% and 3.7% under the assistive exoskeleton control of human walking on sand. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11777v1-abstract-full').style.display = 'none'; document.getElementById('2411.11777v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Eight pages, eleven figures, submitted to IEEE Robotics and Automation Letters</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11410">arXiv:2411.11410</a> <span> [<a href="https://arxiv.org/pdf/2411.11410">pdf</a>, <a href="https://arxiv.org/format/2411.11410">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> Detecting Multi-Parameter Constraint Inconsistencies in Python Data Science Libraries </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Xu%2C+X">Xiufeng Xu</a>, <a href="/search/cs?searchtype=author&query=Xie%2C+F">Fuman Xie</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chenguang Zhu</a>, <a href="/search/cs?searchtype=author&query=Bai%2C+G">Guangdong Bai</a>, <a href="/search/cs?searchtype=author&query=Khurshid%2C+S">Sarfraz Khurshid</a>, <a href="/search/cs?searchtype=author&query=Li%2C+Y">Yi Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11410v2-abstract-short" style="display: inline;"> Modern AI- and Data-intensive software systems rely heavily on data science and machine learning libraries that provide essential algorithmic implementations and computational frameworks. These libraries expose complex APIs whose correct usage has to follow constraints among multiple interdependent parameters. Developers using these APIs are expected to learn about the constraints through the prov… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11410v2-abstract-full').style.display = 'inline'; document.getElementById('2411.11410v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11410v2-abstract-full" style="display: none;"> Modern AI- and Data-intensive software systems rely heavily on data science and machine learning libraries that provide essential algorithmic implementations and computational frameworks. These libraries expose complex APIs whose correct usage has to follow constraints among multiple interdependent parameters. Developers using these APIs are expected to learn about the constraints through the provided documentations and any discrepancy may lead to unexpected behaviors. However, maintaining correct and consistent multi-parameter constraints in API documentations remains a significant challenge for API compatibility and reliability. To address this challenge, we propose MPDetector, for detecting inconsistencies between code and documentation, specifically focusing on multi-parameter constraints. MPDetector identifies these constraints at the code level by exploring execution paths through symbolic execution and further extracts corresponding constraints from documentation using large language models (LLMs). We propose a customized fuzzy constraint logic to reconcile the unpredictability of LLM outputs and detects logical inconsistencies between the code and documentation constraints. We collected and constructed two datasets from four popular data science libraries and evaluated MPDetector on them. The results demonstrate that MPDetector can effectively detect inconsistency issues with the precision of 92.8%. We further reported 14 detected inconsistency issues to the library developers, who have confirmed 11 issues at the time of writing. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11410v2-abstract-full').style.display = 'none'; document.getElementById('2411.11410v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.07354">arXiv:2411.07354</a> <span> [<a href="https://arxiv.org/pdf/2411.07354">pdf</a>, <a href="https://arxiv.org/ps/2411.07354">ps</a>, <a href="https://arxiv.org/format/2411.07354">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Science and Game Theory">cs.GT</span> </div> </div> <p class="title is-5 mathjax"> Strategyproof Learning with Advice </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Balkanski%2C+E">Eric Balkanski</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Cherlin Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.07354v1-abstract-short" style="display: inline;"> An important challenge in robust machine learning is when training data is provided by strategic sources who may intentionally report erroneous data for their own benefit. A line of work at the intersection of machine learning and mechanism design aims to deter strategic agents from reporting erroneous training data by designing learning algorithms that are strategyproof. Strategyproofness is a st… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07354v1-abstract-full').style.display = 'inline'; document.getElementById('2411.07354v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.07354v1-abstract-full" style="display: none;"> An important challenge in robust machine learning is when training data is provided by strategic sources who may intentionally report erroneous data for their own benefit. A line of work at the intersection of machine learning and mechanism design aims to deter strategic agents from reporting erroneous training data by designing learning algorithms that are strategyproof. Strategyproofness is a strong and desirable property, but it comes at a cost in the approximation ratio of even simple risk minimization problems. In this paper, we study strategyproof regression and classification problems in a model with advice. This model is part of a recent line on mechanism design with advice where the goal is to achieve both an improved approximation ratio when the advice is correct (consistency) and a bounded approximation ratio when the advice is incorrect (robustness). We provide the first non-trivial consistency-robustness tradeoffs for strategyproof regression and classification, which hold for simple yet interesting classes of functions. For classes of constant functions, we give a deterministic and strategyproof mechanism that is, for any $纬\in (0, 2]$, $1+纬$ consistent and $1 + 4/纬$ robust and provide a lower bound that shows that this tradeoff is optimal. We extend this mechanism and its guarantees to homogeneous linear regression over $\mathbb{R}$. In the binary classification problem of selecting from three or more labelings, we present strong impossibility results for both deterministic and randomized mechanism. Finally, we provide deterministic and randomized mechanisms for selecting from two labelings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07354v1-abstract-full').style.display = 'none'; document.getElementById('2411.07354v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.06665">arXiv:2411.06665</a> <span> [<a href="https://arxiv.org/pdf/2411.06665">pdf</a>, <a href="https://arxiv.org/format/2411.06665">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Learning from Different Samples: A Source-free Framework for Semi-supervised Domain Adaptation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Huang%2C+X">Xinyang Huang</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chuang Zhu</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+B">Bowen Zhang</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+S">Shanghang Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.06665v1-abstract-short" style="display: inline;"> Semi-supervised domain adaptation (SSDA) has been widely studied due to its ability to utilize a few labeled target data to improve the generalization ability of the model. However, existing methods only consider designing certain strategies for target samples to adapt, ignoring the exploration of customized learning for different target samples. When the model encounters complex target distributi… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06665v1-abstract-full').style.display = 'inline'; document.getElementById('2411.06665v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.06665v1-abstract-full" style="display: none;"> Semi-supervised domain adaptation (SSDA) has been widely studied due to its ability to utilize a few labeled target data to improve the generalization ability of the model. However, existing methods only consider designing certain strategies for target samples to adapt, ignoring the exploration of customized learning for different target samples. When the model encounters complex target distribution, existing methods will perform limited due to the inability to clearly and comprehensively learn the knowledge of multiple types of target samples. To fill this gap, this paper focuses on designing a framework to use different strategies for comprehensively mining different target samples. We propose a novel source-free framework (SOUF) to achieve semi-supervised fine-tuning of the source pre-trained model on the target domain. Different from existing SSDA methods, SOUF decouples SSDA from the perspectives of different target samples, specifically designing robust learning techniques for unlabeled, reliably labeled, and noisy pseudo-labeled target samples. For unlabeled target samples, probability-based weighted contrastive learning (PWC) helps the model learn more discriminative feature representations. To mine the latent knowledge of labeled target samples, reliability-based mixup contrastive learning (RMC) learns complex knowledge from the constructed reliable sample set. Finally, predictive regularization learning (PR) further mitigates the misleading effect of noisy pseudo-labeled samples on the model. Extensive experiments on benchmark datasets demonstrate the superiority of our framework over state-of-the-art methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.06665v1-abstract-full').style.display = 'none'; document.getElementById('2411.06665v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02979">arXiv:2411.02979</a> <span> [<a href="https://arxiv.org/pdf/2411.02979">pdf</a>, <a href="https://arxiv.org/format/2411.02979">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/s11704-024-40417-7">10.1007/s11704-024-40417-7 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> CAD-NeRF: Learning NeRFs from Uncalibrated Few-view Images by CAD Model Retrieval </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wen%2C+X">Xin Wen</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+X">Xuening Zhu</a>, <a href="/search/cs?searchtype=author&query=Yi%2C+R">Renjiao Yi</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+Z">Zhifeng Wang</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chenyang Zhu</a>, <a href="/search/cs?searchtype=author&query=Xu%2C+K">Kai Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02979v1-abstract-short" style="display: inline;"> Reconstructing from multi-view images is a longstanding problem in 3D vision, where neural radiance fields (NeRFs) have shown great potential and get realistic rendered images of novel views. Currently, most NeRF methods either require accurate camera poses or a large number of input images, or even both. Reconstructing NeRF from few-view images without poses is challenging and highly ill-posed. T… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02979v1-abstract-full').style.display = 'inline'; document.getElementById('2411.02979v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02979v1-abstract-full" style="display: none;"> Reconstructing from multi-view images is a longstanding problem in 3D vision, where neural radiance fields (NeRFs) have shown great potential and get realistic rendered images of novel views. Currently, most NeRF methods either require accurate camera poses or a large number of input images, or even both. Reconstructing NeRF from few-view images without poses is challenging and highly ill-posed. To address this problem, we propose CAD-NeRF, a method reconstructed from less than 10 images without any known poses. Specifically, we build a mini library of several CAD models from ShapeNet and render them from many random views. Given sparse-view input images, we run a model and pose retrieval from the library, to get a model with similar shapes, serving as the density supervision and pose initializations. Here we propose a multi-view pose retrieval method to avoid pose conflicts among views, which is a new and unseen problem in uncalibrated NeRF methods. Then, the geometry of the object is trained by the CAD guidance. The deformation of the density field and camera poses are optimized jointly. Then texture and density are trained and fine-tuned as well. All training phases are in self-supervised manners. Comprehensive evaluations of synthetic and real images show that CAD-NeRF successfully learns accurate densities with a large deformation from retrieved CAD models, showing the generalization abilities. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02979v1-abstract-full').style.display = 'none'; document.getElementById('2411.02979v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The article has been accepted by Frontiers of Computer Science (FCS)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.01789">arXiv:2411.01789</a> <span> [<a href="https://arxiv.org/pdf/2411.01789">pdf</a>, <a href="https://arxiv.org/format/2411.01789">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> Generating executable oracles to check conformance of client code to requirements of JDK Javadocs using LLMs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Jiang%2C+S">Shan Jiang</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chenguang Zhu</a>, <a href="/search/cs?searchtype=author&query=Khurshid%2C+S">Sarfraz Khurshid</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.01789v1-abstract-short" style="display: inline;"> Software testing remains the most widely used methodology for validating quality of code. However, effectiveness of testing critically depends on the quality of test suites used. Test cases in a test suite consist of two fundamental parts: (1) input values for the code under test, and (2) correct checks for the outputs it produces. These checks are commonly written as assertions, and termed test o… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01789v1-abstract-full').style.display = 'inline'; document.getElementById('2411.01789v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.01789v1-abstract-full" style="display: none;"> Software testing remains the most widely used methodology for validating quality of code. However, effectiveness of testing critically depends on the quality of test suites used. Test cases in a test suite consist of two fundamental parts: (1) input values for the code under test, and (2) correct checks for the outputs it produces. These checks are commonly written as assertions, and termed test oracles. The last couple of decades have seen much progress in automated test input generation, e.g., using fuzzing and symbolic execution. However, automating test oracles remains a relatively less explored problem area. Indeed, a test oracle by its nature requires knowledge of expected behavior, which may only be known to the developer and may not not exist in a formal language that supports automated reasoning. Our focus in this paper is automation of test oracles for clients of widely used Java libraries, e.g., java.lang and java.util packages. Our key insight is that Javadocs that provide a rich source of information can enable automated generation of test oracles. Javadocs of the core Java libraries are fairly detailed documents that contain natural language descriptions of not only how the libraries behave but also how the clients must (not) use them. We use large language models as an enabling technology to embody our insight into a framework for test oracle automation, and evaluate it experimentally. Our experiments demonstrate that LLMs can generate oracles for checking normal and exceptional behaviors from Javadocs, with 98.8% of these oracles being compilable and 96.4% accurately reflecting intended properties. Even for the few incorrect oracles, errors are minor and can be easily corrected with the help of additional comment information generated by the LLMs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01789v1-abstract-full').style.display = 'none'; document.getElementById('2411.01789v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.22746">arXiv:2410.22746</a> <span> [<a href="https://arxiv.org/pdf/2410.22746">pdf</a>, <a href="https://arxiv.org/format/2410.22746">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Unauthorized UAV Countermeasure for Low-Altitude Economy: Joint Communications and Jamming based on MIMO Cellular Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Li%2C+Z">Zhuoran Li</a>, <a href="/search/cs?searchtype=author&query=Gao%2C+Z">Zhen Gao</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+K">Kuiyu Wang</a>, <a href="/search/cs?searchtype=author&query=Mei%2C+Y">Yikun Mei</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chunli Zhu</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+L">Lei Chen</a>, <a href="/search/cs?searchtype=author&query=Wu%2C+X">Xiaomei Wu</a>, <a href="/search/cs?searchtype=author&query=Niyato%2C+D">Dusit Niyato</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.22746v2-abstract-short" style="display: inline;"> To ensure the thriving development of low-altitude economy, countering unauthorized unmanned aerial vehicles (UAVs) is an essential task. The existing widely deployed base stations hold great potential for joint communication and jamming. In light of this, this paper investigates the joint design of beamforming to simultaneously support communication with legitimate users and countermeasure agains… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.22746v2-abstract-full').style.display = 'inline'; document.getElementById('2410.22746v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.22746v2-abstract-full" style="display: none;"> To ensure the thriving development of low-altitude economy, countering unauthorized unmanned aerial vehicles (UAVs) is an essential task. The existing widely deployed base stations hold great potential for joint communication and jamming. In light of this, this paper investigates the joint design of beamforming to simultaneously support communication with legitimate users and countermeasure against unauthorized UAVs based on dual-functional multiple-input multiple-output (MIMO) cellular systems. We first formulate a joint communication and jamming (JCJ) problem, relaxing it through semi-definite relaxation (SDR) to obtain a tractable semi-definite programming (SDP) problem, with SDR providing an essential step toward simplifying the complex JCJ design. Although the solution to the relaxed SDP problem cannot directly solve the original problem, it offers valuable insights for further refinement. Therefore, we design a novel constraint specifically tailored to the structure of the SDP problem, ensuring that the solution adheres to the rank-1 constraint of the original problem. Finally, we validate effectiveness of the proposed JCJ scheme through extensive simulations. Simulation codes are provided to reproduce the results in this paper: https://github.com/LiZhuoRan0. The results confirm that the proposed JCJ scheme can operate effectively when the total number of legitimate users and unauthorized UAVs exceeds the number of antennas. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.22746v2-abstract-full').style.display = 'none'; document.getElementById('2410.22746v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The paper has been accepted by IEEE IoTJ, and the code is available for result reproduction</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.18983">arXiv:2410.18983</a> <span> [<a href="https://arxiv.org/pdf/2410.18983">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Smart Navigation System for Parking Assignment at Large Events: Incorporating Heterogeneous Driver Characteristics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Cheng%2C+X">Xi Cheng</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+T">Tong Liu</a>, <a href="/search/cs?searchtype=author&query=Su%2C+G">Gaofeng Su</a>, <a href="/search/cs?searchtype=author&query=Che%2C+C">Chang Che</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chen Zhu</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+K">Ke Liu</a>, <a href="/search/cs?searchtype=author&query=Cai%2C+B">Binze Cai</a>, <a href="/search/cs?searchtype=author&query=Hu%2C+X">Xin Hu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.18983v1-abstract-short" style="display: inline;"> Parking challenges escalate significantly during large events such as concerts and sports games, yet few studies address dynamic parking lot assignments in these occasions. This paper introduces a smart navigation system designed to optimize parking assignments efficiently during major events, employing a mixed search algorithm that considers diverse drivers characteristics. We validated our syste… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.18983v1-abstract-full').style.display = 'inline'; document.getElementById('2410.18983v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.18983v1-abstract-full" style="display: none;"> Parking challenges escalate significantly during large events such as concerts and sports games, yet few studies address dynamic parking lot assignments in these occasions. This paper introduces a smart navigation system designed to optimize parking assignments efficiently during major events, employing a mixed search algorithm that considers diverse drivers characteristics. We validated our system through simulations conducted in Berkeley, CA during the "Big Game" showcasing the advantages of our novel parking assignment approach. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.18983v1-abstract-full').style.display = 'none'; document.getElementById('2410.18983v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: text overlap with arXiv:2406.05135</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.17434">arXiv:2410.17434</a> <span> [<a href="https://arxiv.org/pdf/2410.17434">pdf</a>, <a href="https://arxiv.org/format/2410.17434">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> LongVU: Spatiotemporal Adaptive Compression for Long Video-Language Understanding </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Shen%2C+X">Xiaoqian Shen</a>, <a href="/search/cs?searchtype=author&query=Xiong%2C+Y">Yunyang Xiong</a>, <a href="/search/cs?searchtype=author&query=Zhao%2C+C">Changsheng Zhao</a>, <a href="/search/cs?searchtype=author&query=Wu%2C+L">Lemeng Wu</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+J">Jun Chen</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chenchen Zhu</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+Z">Zechun Liu</a>, <a href="/search/cs?searchtype=author&query=Xiao%2C+F">Fanyi Xiao</a>, <a href="/search/cs?searchtype=author&query=Varadarajan%2C+B">Balakrishnan Varadarajan</a>, <a href="/search/cs?searchtype=author&query=Bordes%2C+F">Florian Bordes</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+Z">Zhuang Liu</a>, <a href="/search/cs?searchtype=author&query=Xu%2C+H">Hu Xu</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+H+J">Hyunwoo J. Kim</a>, <a href="/search/cs?searchtype=author&query=Soran%2C+B">Bilge Soran</a>, <a href="/search/cs?searchtype=author&query=Krishnamoorthi%2C+R">Raghuraman Krishnamoorthi</a>, <a href="/search/cs?searchtype=author&query=Elhoseiny%2C+M">Mohamed Elhoseiny</a>, <a href="/search/cs?searchtype=author&query=Chandra%2C+V">Vikas Chandra</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.17434v1-abstract-short" style="display: inline;"> Multimodal Large Language Models (MLLMs) have shown promising progress in understanding and analyzing video content. However, processing long videos remains a significant challenge constrained by LLM's context size. To address this limitation, we propose LongVU, a spatiotemporal adaptive compression mechanism thats reduces the number of video tokens while preserving visual details of long videos.… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.17434v1-abstract-full').style.display = 'inline'; document.getElementById('2410.17434v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.17434v1-abstract-full" style="display: none;"> Multimodal Large Language Models (MLLMs) have shown promising progress in understanding and analyzing video content. However, processing long videos remains a significant challenge constrained by LLM's context size. To address this limitation, we propose LongVU, a spatiotemporal adaptive compression mechanism thats reduces the number of video tokens while preserving visual details of long videos. Our idea is based on leveraging cross-modal query and inter-frame dependencies to adaptively reduce temporal and spatial redundancy in videos. Specifically, we leverage DINOv2 features to remove redundant frames that exhibit high similarity. Then we utilize text-guided cross-modal query for selective frame feature reduction. Further, we perform spatial token reduction across frames based on their temporal dependencies. Our adaptive compression strategy effectively processes a large number of frames with little visual information loss within given context length. Our LongVU consistently surpass existing methods across a variety of video understanding benchmarks, especially on hour-long video understanding tasks such as VideoMME and MLVU. Given a light-weight LLM, our LongVU also scales effectively into a smaller size with state-of-the-art video understanding performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.17434v1-abstract-full').style.display = 'none'; document.getElementById('2410.17434v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Project page: https://vision-cair.github.io/LongVU</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.16953">arXiv:2410.16953</a> <span> [<a href="https://arxiv.org/pdf/2410.16953">pdf</a>, <a href="https://arxiv.org/format/2410.16953">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Towards Real Zero-Shot Camouflaged Object Segmentation without Camouflaged Annotations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Lei%2C+C">Cheng Lei</a>, <a href="/search/cs?searchtype=author&query=Fan%2C+J">Jie Fan</a>, <a href="/search/cs?searchtype=author&query=Li%2C+X">Xinran Li</a>, <a href="/search/cs?searchtype=author&query=Xiang%2C+T">Tianzhu Xiang</a>, <a href="/search/cs?searchtype=author&query=Li%2C+A">Ao Li</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Ce Zhu</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+L">Le Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.16953v1-abstract-short" style="display: inline;"> Camouflaged Object Segmentation (COS) faces significant challenges due to the scarcity of annotated data, where meticulous pixel-level annotation is both labor-intensive and costly, primarily due to the intricate object-background boundaries. Addressing the core question, "Can COS be effectively achieved in a zero-shot manner without manual annotations for any camouflaged object?" we affirmatively… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.16953v1-abstract-full').style.display = 'inline'; document.getElementById('2410.16953v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.16953v1-abstract-full" style="display: none;"> Camouflaged Object Segmentation (COS) faces significant challenges due to the scarcity of annotated data, where meticulous pixel-level annotation is both labor-intensive and costly, primarily due to the intricate object-background boundaries. Addressing the core question, "Can COS be effectively achieved in a zero-shot manner without manual annotations for any camouflaged object?" we affirmatively respond and introduce a robust zero-shot COS framework. This framework leverages the inherent local pattern bias of COS and employs a broad semantic feature space derived from salient object segmentation (SOS) for efficient zero-shot transfer. We incorporate an Masked Image Modeling (MIM) based image encoder optimized for Parameter-Efficient Fine-Tuning (PEFT), a Multimodal Large Language Model (M-LLM), and a Multi-scale Fine-grained Alignment (MFA) mechanism. The MIM pre-trained image encoder focuses on capturing essential low-level features, while the M-LLM generates caption embeddings processed alongside these visual cues. These embeddings are precisely aligned using MFA, enabling our framework to accurately interpret and navigate complex semantic contexts. To optimize operational efficiency, we introduce a learnable codebook that represents the M-LLM during inference, significantly reducing computational overhead. Our framework demonstrates its versatility and efficacy through rigorous experimentation, achieving state-of-the-art performance in zero-shot COS with $F_尾^w$ scores of 72.9\% on CAMO and 71.7\% on COD10K. By removing the M-LLM during inference, we achieve an inference speed comparable to that of traditional end-to-end models, reaching 18.1 FPS. Code: https://github.com/R-LEI360725/ZSCOS-CaMF <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.16953v1-abstract-full').style.display = 'none'; document.getElementById('2410.16953v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.15916">arXiv:2410.15916</a> <span> [<a href="https://arxiv.org/pdf/2410.15916">pdf</a>, <a href="https://arxiv.org/format/2410.15916">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Leveraging CORAL-Correlation Consistency Network for Semi-Supervised Left Atrium MRI Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Li%2C+X">Xinze Li</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+R">Runlin Huang</a>, <a href="/search/cs?searchtype=author&query=Wu%2C+Z">Zhenghao Wu</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+B">Bohan Yang</a>, <a href="/search/cs?searchtype=author&query=Fan%2C+W">Wentao Fan</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chengzhang Zhu</a>, <a href="/search/cs?searchtype=author&query=Su%2C+W">Weifeng Su</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.15916v1-abstract-short" style="display: inline;"> Semi-supervised learning (SSL) has been widely used to learn from both a few labeled images and many unlabeled images to overcome the scarcity of labeled samples in medical image segmentation. Most current SSL-based segmentation methods use pixel values directly to identify similar features in labeled and unlabeled data. They usually fail to accurately capture the intricate attachment structures i… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.15916v1-abstract-full').style.display = 'inline'; document.getElementById('2410.15916v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.15916v1-abstract-full" style="display: none;"> Semi-supervised learning (SSL) has been widely used to learn from both a few labeled images and many unlabeled images to overcome the scarcity of labeled samples in medical image segmentation. Most current SSL-based segmentation methods use pixel values directly to identify similar features in labeled and unlabeled data. They usually fail to accurately capture the intricate attachment structures in the left atrium, such as the areas of inconsistent density or exhibit outward curvatures, adding to the complexity of the task. In this paper, we delve into this issue and introduce an effective solution, CORAL(Correlation-Aligned)-Correlation Consistency Network (CORN), to capture the global structure shape and local details of Left Atrium. Diverging from previous methods focused on each local pixel value, the CORAL-Correlation Consistency Module (CCM) in the CORN leverages second-order statistical information to capture global structural features by minimizing the distribution discrepancy between labeled and unlabeled samples in feature space. Yet, direct construction of features from unlabeled data frequently results in ``Sample Selection Bias'', leading to flawed supervision. We thus further propose the Dynamic Feature Pool (DFP) for the CCM, which utilizes a confidence-based filtering strategy to remove incorrectly selected features and regularize both teacher and student models by constraining the similarity matrix to be consistent. Extensive experiments on the Left Atrium dataset have shown that the proposed CORN outperforms previous state-of-the-art semi-supervised learning methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.15916v1-abstract-full').style.display = 'none'; document.getElementById('2410.15916v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 3 figures, Accepted by 2024 IEEE International Conference on Bioinformatics and Biomedicine (BIBM 2024)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.4.6 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.15553">arXiv:2410.15553</a> <span> [<a href="https://arxiv.org/pdf/2410.15553">pdf</a>, <a href="https://arxiv.org/format/2410.15553">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Multi-IF: Benchmarking LLMs on Multi-Turn and Multilingual Instructions Following </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=He%2C+Y">Yun He</a>, <a href="/search/cs?searchtype=author&query=Jin%2C+D">Di Jin</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+C">Chaoqi Wang</a>, <a href="/search/cs?searchtype=author&query=Bi%2C+C">Chloe Bi</a>, <a href="/search/cs?searchtype=author&query=Mandyam%2C+K">Karishma Mandyam</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+H">Hejia Zhang</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chen Zhu</a>, <a href="/search/cs?searchtype=author&query=Li%2C+N">Ning Li</a>, <a href="/search/cs?searchtype=author&query=Xu%2C+T">Tengyu Xu</a>, <a href="/search/cs?searchtype=author&query=Lv%2C+H">Hongjiang Lv</a>, <a href="/search/cs?searchtype=author&query=Bhosale%2C+S">Shruti Bhosale</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chenguang Zhu</a>, <a href="/search/cs?searchtype=author&query=Sankararaman%2C+K+A">Karthik Abinav Sankararaman</a>, <a href="/search/cs?searchtype=author&query=Helenowski%2C+E">Eryk Helenowski</a>, <a href="/search/cs?searchtype=author&query=Kambadur%2C+M">Melanie Kambadur</a>, <a href="/search/cs?searchtype=author&query=Tayade%2C+A">Aditya Tayade</a>, <a href="/search/cs?searchtype=author&query=Ma%2C+H">Hao Ma</a>, <a href="/search/cs?searchtype=author&query=Fang%2C+H">Han Fang</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+S">Sinong Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.15553v2-abstract-short" style="display: inline;"> Large Language Models (LLMs) have demonstrated impressive capabilities in various tasks, including instruction following, which is crucial for aligning model outputs with user expectations. However, evaluating LLMs' ability to follow instructions remains challenging due to the complexity and subjectivity of human language. Current benchmarks primarily focus on single-turn, monolingual instructions… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.15553v2-abstract-full').style.display = 'inline'; document.getElementById('2410.15553v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.15553v2-abstract-full" style="display: none;"> Large Language Models (LLMs) have demonstrated impressive capabilities in various tasks, including instruction following, which is crucial for aligning model outputs with user expectations. However, evaluating LLMs' ability to follow instructions remains challenging due to the complexity and subjectivity of human language. Current benchmarks primarily focus on single-turn, monolingual instructions, which do not adequately reflect the complexities of real-world applications that require handling multi-turn and multilingual interactions. To address this gap, we introduce Multi-IF, a new benchmark designed to assess LLMs' proficiency in following multi-turn and multilingual instructions. Multi-IF, which utilizes a hybrid framework combining LLM and human annotators, expands upon the IFEval by incorporating multi-turn sequences and translating the English prompts into another 7 languages, resulting in a dataset of 4,501 multilingual conversations, where each has three turns. Our evaluation of 14 state-of-the-art LLMs on Multi-IF reveals that it presents a significantly more challenging task than existing benchmarks. All the models tested showed a higher rate of failure in executing instructions correctly with each additional turn. For example, o1-preview drops from 0.877 at the first turn to 0.707 at the third turn in terms of average accuracy over all languages. Moreover, languages with non-Latin scripts (Hindi, Russian, and Chinese) generally exhibit higher error rates, suggesting potential limitations in the models' multilingual capabilities. We release Multi-IF prompts and the evaluation code base to encourage further research in this critical area. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.15553v2-abstract-full').style.display = 'none'; document.getElementById('2410.15553v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.15046">arXiv:2410.15046</a> <span> [<a href="https://arxiv.org/pdf/2410.15046">pdf</a>, <a href="https://arxiv.org/format/2410.15046">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> </div> <p class="title is-5 mathjax"> Towards Truss-Based Temporal Community Search </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Yang%2C+H">Huihui Yang</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chunxue Zhu</a>, <a href="/search/cs?searchtype=author&query=Lin%2C+L">Longlong Lin</a>, <a href="/search/cs?searchtype=author&query=Yuan%2C+P">Pingpeng Yuan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.15046v1-abstract-short" style="display: inline;"> Identifying communities from temporal networks facilitates the understanding of potential dynamic relationships among entities, which has already received extensive applications. However, existing methods primarily rely on lower-order connectivity (e.g., temporal edges) to capture the structural and temporal cohesiveness of the community, often neglecting higher-order temporal connectivity, which… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.15046v1-abstract-full').style.display = 'inline'; document.getElementById('2410.15046v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.15046v1-abstract-full" style="display: none;"> Identifying communities from temporal networks facilitates the understanding of potential dynamic relationships among entities, which has already received extensive applications. However, existing methods primarily rely on lower-order connectivity (e.g., temporal edges) to capture the structural and temporal cohesiveness of the community, often neglecting higher-order temporal connectivity, which leads to sub-optimal results. To overcome this dilemma, we propose a novel temporal community model named maximal-truss (MDT). This model emphasizes maximal temporal support, ensuring all edges are connected by a sequence of triangles with elegant temporal properties. To search the MDT containing the user-initiated query node q (q-MDT), we first design a powerful local search framework with some effective pruning strategies. This approach aims to identify the solution from the small temporal subgraph which is expanded from q. To further improve the performance on large graphs, we build the temporal trussness index (TT-index) for all edges. Leveraging the TT-index allows us to efficiently search high-probability target subgraphs instead of performing a full search across the entire input graph. Empirical results on nine real-world networks and seven competitors demonstrate the superiority of our solutions in terms of efficiency, effectiveness, and scalability <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.15046v1-abstract-full').style.display = 'none'; document.getElementById('2410.15046v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.14940">arXiv:2410.14940</a> <span> [<a href="https://arxiv.org/pdf/2410.14940">pdf</a>, <a href="https://arxiv.org/format/2410.14940">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Nova: A Practical and Advanced Alignment </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Lin%2C+M">Mingan Lin</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+F">Fan Yang</a>, <a href="/search/cs?searchtype=author&query=Shen%2C+Y">Yanjun Shen</a>, <a href="/search/cs?searchtype=author&query=Sun%2C+H">Haoze Sun</a>, <a href="/search/cs?searchtype=author&query=Li%2C+T">Tianpeng Li</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+T">Tao Zhang</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chenzheng Zhu</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+T">Tao Zhang</a>, <a href="/search/cs?searchtype=author&query=Zheng%2C+M">Miao Zheng</a>, <a href="/search/cs?searchtype=author&query=Li%2C+X">Xu Li</a>, <a href="/search/cs?searchtype=author&query=Zhou%2C+Y">Yijie Zhou</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+M">Mingyang Chen</a>, <a href="/search/cs?searchtype=author&query=Qin%2C+Y">Yanzhao Qin</a>, <a href="/search/cs?searchtype=author&query=Li%2C+Y">Youquan Li</a>, <a href="/search/cs?searchtype=author&query=Liang%2C+H">Hao Liang</a>, <a href="/search/cs?searchtype=author&query=Li%2C+F">Fei Li</a>, <a href="/search/cs?searchtype=author&query=Li%2C+Y">Yadong Li</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+M">Mang Wang</a>, <a href="/search/cs?searchtype=author&query=Dong%2C+G">Guosheng Dong</a>, <a href="/search/cs?searchtype=author&query=Fang%2C+K">Kun Fang</a>, <a href="/search/cs?searchtype=author&query=Xu%2C+J">Jianhua Xu</a>, <a href="/search/cs?searchtype=author&query=Cui%2C+B">Bin Cui</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+W">Wentao Zhang</a>, <a href="/search/cs?searchtype=author&query=Zhou%2C+Z">Zenan Zhou</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+W">Weipeng Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.14940v3-abstract-short" style="display: inline;"> We introduce Nova, a suite of practical alignment techniques employed in a series of empirically validated high-performing models. This represents the first comprehensive account of alignment methodologies, offering valuable insights for advancing AI research. We investigate the critical components that enhance model performance during the alignment process, including optimization methods, data st… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.14940v3-abstract-full').style.display = 'inline'; document.getElementById('2410.14940v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.14940v3-abstract-full" style="display: none;"> We introduce Nova, a suite of practical alignment techniques employed in a series of empirically validated high-performing models. This represents the first comprehensive account of alignment methodologies, offering valuable insights for advancing AI research. We investigate the critical components that enhance model performance during the alignment process, including optimization methods, data strategies, capability enhancements, and evaluation processes. The process spans three key stages: Prompt Augmentation System(PAS), Supervised Fine-Tuning(SFT), and Preference Alignment. The problems encountered, the solutions applied, and the improvements made are thoroughly recorded. Through comparisons across well-established benchmarks, we highlight the technological advancements enabled by Nova Alignment. Importantly, Qwen2-Nova-72B and Llama3-PBM-Nova-70B are instruct versions of the Qwen2-72B and Llama-3-70B base models, optimized through Nova. The Nova models show significant core improvements, with user experience gains of 17% to 28%, and excels on specialized benchmarks. In open-source benchmark evaluations, both Qwen2-Nova-72B and Llama3-PBM-Nova-70B consistently outperform their respective official instruct versions across nearly all datasets. This report aims to clarify the key technologies behind the alignment process, fostering a deeper understanding within the community. Llama3-PBM-Nova-70B model is available at https://huggingface.co/PKU-Baichuan-MLSystemLab/Llama3-PBM-Nova-70B. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.14940v3-abstract-full').style.display = 'none'; document.getElementById('2410.14940v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.14195">arXiv:2410.14195</a> <span> [<a href="https://arxiv.org/pdf/2410.14195">pdf</a>, <a href="https://arxiv.org/format/2410.14195">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Rethinking Transformer for Long Contextual Histopathology Whole Slide Image Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Li%2C+H">Honglin Li</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+Y">Yunlong Zhang</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+P">Pingyi Chen</a>, <a href="/search/cs?searchtype=author&query=Shui%2C+Z">Zhongyi Shui</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chenglu Zhu</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+L">Lin Yang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.14195v1-abstract-short" style="display: inline;"> Histopathology Whole Slide Image (WSI) analysis serves as the gold standard for clinical cancer diagnosis in the daily routines of doctors. To develop computer-aided diagnosis model for WSIs, previous methods typically employ Multi-Instance Learning to enable slide-level prediction given only slide-level labels. Among these models, vanilla attention mechanisms without pairwise interactions have tr… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.14195v1-abstract-full').style.display = 'inline'; document.getElementById('2410.14195v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.14195v1-abstract-full" style="display: none;"> Histopathology Whole Slide Image (WSI) analysis serves as the gold standard for clinical cancer diagnosis in the daily routines of doctors. To develop computer-aided diagnosis model for WSIs, previous methods typically employ Multi-Instance Learning to enable slide-level prediction given only slide-level labels. Among these models, vanilla attention mechanisms without pairwise interactions have traditionally been employed but are unable to model contextual information. More recently, self-attention models have been utilized to address this issue. To alleviate the computational complexity of long sequences in large WSIs, methods like HIPT use region-slicing, and TransMIL employs approximation of full self-attention. Both approaches suffer from suboptimal performance due to the loss of key information. Moreover, their use of absolute positional embedding struggles to effectively handle long contextual dependencies in shape-varying WSIs. In this paper, we first analyze how the low-rank nature of the long-sequence attention matrix constrains the representation ability of WSI modelling. Then, we demonstrate that the rank of attention matrix can be improved by focusing on local interactions via a local attention mask. Our analysis shows that the local mask aligns with the attention patterns in the lower layers of the Transformer. Furthermore, the local attention mask can be implemented during chunked attention calculation, reducing the quadratic computational complexity to linear with a small local bandwidth. Building on this, we propose a local-global hybrid Transformer for both computational acceleration and local-global information interactions modelling. Our method, Long-contextual MIL (LongMIL), is evaluated through extensive experiments on various WSI tasks to validate its superiority. Our code will be available at github.com/invoker-LL/Long-MIL. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.14195v1-abstract-full').style.display = 'none'; document.getElementById('2410.14195v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">NeurIPS-2024. arXiv admin note: text overlap with arXiv:2311.12885</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.14105">arXiv:2410.14105</a> <span> [<a href="https://arxiv.org/pdf/2410.14105">pdf</a>, <a href="https://arxiv.org/format/2410.14105">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> DMGNN: Detecting and Mitigating Backdoor Attacks in Graph Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Sui%2C+H">Hao Sui</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+B">Bing Chen</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+J">Jiale Zhang</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chengcheng Zhu</a>, <a href="/search/cs?searchtype=author&query=Wu%2C+D">Di Wu</a>, <a href="/search/cs?searchtype=author&query=Lu%2C+Q">Qinghua Lu</a>, <a href="/search/cs?searchtype=author&query=Long%2C+G">Guodong Long</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.14105v1-abstract-short" style="display: inline;"> Recent studies have revealed that GNNs are highly susceptible to multiple adversarial attacks. Among these, graph backdoor attacks pose one of the most prominent threats, where attackers cause models to misclassify by learning the backdoored features with injected triggers and modified target labels during the training phase. Based on the features of the triggers, these attacks can be categorized… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.14105v1-abstract-full').style.display = 'inline'; document.getElementById('2410.14105v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.14105v1-abstract-full" style="display: none;"> Recent studies have revealed that GNNs are highly susceptible to multiple adversarial attacks. Among these, graph backdoor attacks pose one of the most prominent threats, where attackers cause models to misclassify by learning the backdoored features with injected triggers and modified target labels during the training phase. Based on the features of the triggers, these attacks can be categorized into out-of-distribution (OOD) and in-distribution (ID) graph backdoor attacks, triggers with notable differences from the clean sample feature distributions constitute OOD backdoor attacks, whereas the triggers in ID backdoor attacks are nearly identical to the clean sample feature distributions. Existing methods can successfully defend against OOD backdoor attacks by comparing the feature distribution of triggers and clean samples but fail to mitigate stealthy ID backdoor attacks. Due to the lack of proper supervision signals, the main task accuracy is negatively affected in defending against ID backdoor attacks. To bridge this gap, we propose DMGNN against OOD and ID graph backdoor attacks that can powerfully eliminate stealthiness to guarantee defense effectiveness and improve the model performance. Specifically, DMGNN can easily identify the hidden ID and OOD triggers via predicting label transitions based on counterfactual explanation. To further filter the diversity of generated explainable graphs and erase the influence of the trigger features, we present a reverse sampling pruning method to screen and discard the triggers directly on the data level. Extensive experimental evaluations on open graph datasets demonstrate that DMGNN far outperforms the state-of-the-art (SOTA) defense methods, reducing the attack success rate to 5% with almost negligible degradation in model performance (within 3.5%). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.14105v1-abstract-full').style.display = 'none'; document.getElementById('2410.14105v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 8 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.12138">arXiv:2410.12138</a> <span> [<a href="https://arxiv.org/pdf/2410.12138">pdf</a>, <a href="https://arxiv.org/format/2410.12138">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Preference Optimization with Multi-Sample Comparisons </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wang%2C+C">Chaoqi Wang</a>, <a href="/search/cs?searchtype=author&query=Zhao%2C+Z">Zhuokai Zhao</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chen Zhu</a>, <a href="/search/cs?searchtype=author&query=Sankararaman%2C+K+A">Karthik Abinav Sankararaman</a>, <a href="/search/cs?searchtype=author&query=Valko%2C+M">Michal Valko</a>, <a href="/search/cs?searchtype=author&query=Cao%2C+X">Xuefei Cao</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+Z">Zhaorun Chen</a>, <a href="/search/cs?searchtype=author&query=Khabsa%2C+M">Madian Khabsa</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+Y">Yuxin Chen</a>, <a href="/search/cs?searchtype=author&query=Ma%2C+H">Hao Ma</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+S">Sinong Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.12138v1-abstract-short" style="display: inline;"> Recent advancements in generative models, particularly large language models (LLMs) and diffusion models, have been driven by extensive pretraining on large datasets followed by post-training. However, current post-training methods such as reinforcement learning from human feedback (RLHF) and direct alignment from preference methods (DAP) primarily utilize single-sample comparisons. These approach… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.12138v1-abstract-full').style.display = 'inline'; document.getElementById('2410.12138v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.12138v1-abstract-full" style="display: none;"> Recent advancements in generative models, particularly large language models (LLMs) and diffusion models, have been driven by extensive pretraining on large datasets followed by post-training. However, current post-training methods such as reinforcement learning from human feedback (RLHF) and direct alignment from preference methods (DAP) primarily utilize single-sample comparisons. These approaches often fail to capture critical characteristics such as generative diversity and bias, which are more accurately assessed through multiple samples. To address these limitations, we introduce a novel approach that extends post-training to include multi-sample comparisons. To achieve this, we propose Multi-sample Direct Preference Optimization (mDPO) and Multi-sample Identity Preference Optimization (mIPO). These methods improve traditional DAP methods by focusing on group-wise characteristics. Empirically, we demonstrate that multi-sample comparison is more effective in optimizing collective characteristics~(e.g., diversity and bias) for generative models than single-sample comparison. Additionally, our findings suggest that multi-sample comparisons provide a more robust optimization framework, particularly for dataset with label noise. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.12138v1-abstract-full').style.display = 'none'; document.getElementById('2410.12138v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">preprint</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.07671">arXiv:2410.07671</a> <span> [<a href="https://arxiv.org/pdf/2410.07671">pdf</a>, <a href="https://arxiv.org/format/2410.07671">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> DISCO: A Hierarchical Disentangled Cognitive Diagnosis Framework for Interpretable Job Recommendation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Yu%2C+X">Xiaoshan Yu</a>, <a href="/search/cs?searchtype=author&query=Qin%2C+C">Chuan Qin</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+Q">Qi Zhang</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chen Zhu</a>, <a href="/search/cs?searchtype=author&query=Ma%2C+H">Haiping Ma</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+X">Xingyi Zhang</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+H">Hengshu Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.07671v2-abstract-short" style="display: inline;"> The rapid development of online recruitment platforms has created unprecedented opportunities for job seekers while concurrently posing the significant challenge of quickly and accurately pinpointing positions that align with their skills and preferences. Job recommendation systems have significantly alleviated the extensive search burden for job seekers by optimizing user engagement metrics, such… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.07671v2-abstract-full').style.display = 'inline'; document.getElementById('2410.07671v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.07671v2-abstract-full" style="display: none;"> The rapid development of online recruitment platforms has created unprecedented opportunities for job seekers while concurrently posing the significant challenge of quickly and accurately pinpointing positions that align with their skills and preferences. Job recommendation systems have significantly alleviated the extensive search burden for job seekers by optimizing user engagement metrics, such as clicks and applications, thus achieving notable success. In recent years, a substantial amount of research has been devoted to developing effective job recommendation models, primarily focusing on text-matching based and behavior modeling based methods. While these approaches have realized impressive outcomes, it is imperative to note that research on the explainability of recruitment recommendations remains profoundly unexplored. To this end, in this paper, we propose DISCO, a hierarchical Disentanglement based Cognitive diagnosis framework, aimed at flexibly accommodating the underlying representation learning model for effective and interpretable job recommendations. Specifically, we first design a hierarchical representation disentangling module to explicitly mine the hierarchical skill-related factors implied in hidden representations of job seekers and jobs. Subsequently, we propose level-aware association modeling to enhance information communication and robust representation learning both inter- and intra-level, which consists of the interlevel knowledge influence module and the level-wise contrastive learning. Finally, we devise an interaction diagnosis module incorporating a neural diagnosis function for effectively modeling the multi-level recruitment interaction process between job seekers and jobs, which introduces the cognitive measurement theory. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.07671v2-abstract-full').style.display = 'none'; document.getElementById('2410.07671v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by ICDM 2024. 10 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.04847">arXiv:2410.04847</a> <span> [<a href="https://arxiv.org/pdf/2410.04847">pdf</a>, <a href="https://arxiv.org/format/2410.04847">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Causal Context Adjustment Loss for Learned Image Compression </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Han%2C+M">Minghao Han</a>, <a href="/search/cs?searchtype=author&query=Jiang%2C+S">Shiyin Jiang</a>, <a href="/search/cs?searchtype=author&query=Li%2C+S">Shengxi Li</a>, <a href="/search/cs?searchtype=author&query=Deng%2C+X">Xin Deng</a>, <a href="/search/cs?searchtype=author&query=Xu%2C+M">Mai Xu</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Ce Zhu</a>, <a href="/search/cs?searchtype=author&query=Gu%2C+S">Shuhang Gu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.04847v1-abstract-short" style="display: inline;"> In recent years, learned image compression (LIC) technologies have surpassed conventional methods notably in terms of rate-distortion (RD) performance. Most present learned techniques are VAE-based with an autoregressive entropy model, which obviously promotes the RD performance by utilizing the decoded causal context. However, extant methods are highly dependent on the fixed hand-crafted causal c… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.04847v1-abstract-full').style.display = 'inline'; document.getElementById('2410.04847v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.04847v1-abstract-full" style="display: none;"> In recent years, learned image compression (LIC) technologies have surpassed conventional methods notably in terms of rate-distortion (RD) performance. Most present learned techniques are VAE-based with an autoregressive entropy model, which obviously promotes the RD performance by utilizing the decoded causal context. However, extant methods are highly dependent on the fixed hand-crafted causal context. The question of how to guide the auto-encoder to generate a more effective causal context benefit for the autoregressive entropy models is worth exploring. In this paper, we make the first attempt in investigating the way to explicitly adjust the causal context with our proposed Causal Context Adjustment loss (CCA-loss). By imposing the CCA-loss, we enable the neural network to spontaneously adjust important information into the early stage of the autoregressive entropy model. Furthermore, as transformer technology develops remarkably, variants of which have been adopted by many state-of-the-art (SOTA) LIC techniques. The existing computing devices have not adapted the calculation of the attention mechanism well, which leads to a burden on computation quantity and inference latency. To overcome it, we establish a convolutional neural network (CNN) image compression model and adopt the unevenly channel-wise grouped strategy for high efficiency. Ultimately, the proposed CNN-based LIC network trained with our Causal Context Adjustment loss attains a great trade-off between inference latency and rate-distortion performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.04847v1-abstract-full').style.display = 'none'; document.getElementById('2410.04847v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to NeurIPS 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.01272">arXiv:2410.01272</a> <span> [<a href="https://arxiv.org/pdf/2410.01272">pdf</a>, <a href="https://arxiv.org/format/2410.01272">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> "No Matter What You Do": Purifying GNN Models via Backdoor Unlearning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Zhang%2C+J">Jiale Zhang</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chengcheng Zhu</a>, <a href="/search/cs?searchtype=author&query=Rao%2C+B">Bosen Rao</a>, <a href="/search/cs?searchtype=author&query=Sui%2C+H">Hao Sui</a>, <a href="/search/cs?searchtype=author&query=Sun%2C+X">Xiaobing Sun</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+B">Bing Chen</a>, <a href="/search/cs?searchtype=author&query=Zhou%2C+C">Chunyi Zhou</a>, <a href="/search/cs?searchtype=author&query=Ji%2C+S">Shouling Ji</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.01272v2-abstract-short" style="display: inline;"> Recent studies have exposed that GNNs are vulnerable to several adversarial attacks, among which backdoor attack is one of the toughest. Similar to Deep Neural Networks (DNNs), backdoor attacks in GNNs lie in the fact that the attacker modifies a portion of graph data by embedding triggers and enforces the model to learn the trigger feature during the model training process. Despite the massive pr… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.01272v2-abstract-full').style.display = 'inline'; document.getElementById('2410.01272v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.01272v2-abstract-full" style="display: none;"> Recent studies have exposed that GNNs are vulnerable to several adversarial attacks, among which backdoor attack is one of the toughest. Similar to Deep Neural Networks (DNNs), backdoor attacks in GNNs lie in the fact that the attacker modifies a portion of graph data by embedding triggers and enforces the model to learn the trigger feature during the model training process. Despite the massive prior backdoor defense works on DNNs, defending against backdoor attacks in GNNs is largely unexplored, severely hindering the widespread application of GNNs in real-world tasks. To bridge this gap, we present GCleaner, the first backdoor mitigation method on GNNs. GCleaner can mitigate the presence of the backdoor logic within backdoored GNNs by reversing the backdoor learning procedure, aiming to restore the model performance to a level similar to that is directly trained on the original clean dataset. To achieve this objective, we ask: How to recover universal and hard backdoor triggers in GNNs? How to unlearn the backdoor trigger feature while maintaining the model performance? We conduct the graph trigger recovery via the explanation method to identify optimal trigger locations, facilitating the search of universal and hard backdoor triggers in the feature space of the backdoored model through maximal similarity. Subsequently, we introduce the backdoor unlearning mechanism, which combines knowledge distillation and gradient-based explainable knowledge for fine-grained backdoor erasure. Extensive experimental evaluations on four benchmark datasets demonstrate that GCleaner can reduce the backdoor attack success rate to 10% with only 1% of clean data, and has almost negligible degradation in model performance, which far outperforms the state-of-the-art (SOTA) defense methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.01272v2-abstract-full').style.display = 'none'; document.getElementById('2410.01272v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 12 figures, 9 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.20370">arXiv:2409.20370</a> <span> [<a href="https://arxiv.org/pdf/2409.20370">pdf</a>, <a href="https://arxiv.org/format/2409.20370">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> The Perfect Blend: Redefining RLHF with Mixture of Judges </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Xu%2C+T">Tengyu Xu</a>, <a href="/search/cs?searchtype=author&query=Helenowski%2C+E">Eryk Helenowski</a>, <a href="/search/cs?searchtype=author&query=Sankararaman%2C+K+A">Karthik Abinav Sankararaman</a>, <a href="/search/cs?searchtype=author&query=Jin%2C+D">Di Jin</a>, <a href="/search/cs?searchtype=author&query=Peng%2C+K">Kaiyan Peng</a>, <a href="/search/cs?searchtype=author&query=Han%2C+E">Eric Han</a>, <a href="/search/cs?searchtype=author&query=Nie%2C+S">Shaoliang Nie</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chen Zhu</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+H">Hejia Zhang</a>, <a href="/search/cs?searchtype=author&query=Zhou%2C+W">Wenxuan Zhou</a>, <a href="/search/cs?searchtype=author&query=Zeng%2C+Z">Zhouhao Zeng</a>, <a href="/search/cs?searchtype=author&query=He%2C+Y">Yun He</a>, <a href="/search/cs?searchtype=author&query=Mandyam%2C+K">Karishma Mandyam</a>, <a href="/search/cs?searchtype=author&query=Talabzadeh%2C+A">Arya Talabzadeh</a>, <a href="/search/cs?searchtype=author&query=Khabsa%2C+M">Madian Khabsa</a>, <a href="/search/cs?searchtype=author&query=Cohen%2C+G">Gabriel Cohen</a>, <a href="/search/cs?searchtype=author&query=Tian%2C+Y">Yuandong Tian</a>, <a href="/search/cs?searchtype=author&query=Ma%2C+H">Hao Ma</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+S">Sinong Wang</a>, <a href="/search/cs?searchtype=author&query=Fang%2C+H">Han Fang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.20370v1-abstract-short" style="display: inline;"> Reinforcement learning from human feedback (RLHF) has become the leading approach for fine-tuning large language models (LLM). However, RLHF has limitations in multi-task learning (MTL) due to challenges of reward hacking and extreme multi-objective optimization (i.e., trade-off of multiple and/or sometimes conflicting objectives). Applying RLHF for MTL currently requires careful tuning of the wei… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.20370v1-abstract-full').style.display = 'inline'; document.getElementById('2409.20370v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.20370v1-abstract-full" style="display: none;"> Reinforcement learning from human feedback (RLHF) has become the leading approach for fine-tuning large language models (LLM). However, RLHF has limitations in multi-task learning (MTL) due to challenges of reward hacking and extreme multi-objective optimization (i.e., trade-off of multiple and/or sometimes conflicting objectives). Applying RLHF for MTL currently requires careful tuning of the weights for reward model and data combinations. This is often done via human intuition and does not generalize. In this work, we introduce a novel post-training paradigm which we called Constrained Generative Policy Optimization (CGPO). The core of CGPO is Mixture of Judges (MoJ) with cost-efficient constrained policy optimization with stratification, which can identify the perfect blend in RLHF in a principled manner. It shows strong empirical results with theoretical guarantees, does not require extensive hyper-parameter tuning, and is plug-and-play in common post-training pipelines. Together, this can detect and mitigate reward hacking behaviors while reaching a pareto-optimal point across an extremely large number of objectives. Our empirical evaluations demonstrate that CGPO significantly outperforms standard RLHF algorithms like PPO and DPO across various tasks including general chat, STEM questions, instruction following, and coding. Specifically, CGPO shows improvements of 7.4% in AlpacaEval-2 (general chat), 12.5% in Arena-Hard (STEM & reasoning), and consistent gains in other domains like math and coding. Notably, PPO, while commonly used, is prone to severe reward hacking in popular coding benchmarks, which CGPO successfully addresses. This breakthrough in RLHF not only tackles reward hacking and extreme multi-objective optimization challenges but also advances the state-of-the-art in aligning general-purpose LLMs for diverse applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.20370v1-abstract-full').style.display = 'none'; document.getElementById('2409.20370v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">submitted to conference</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.19951">arXiv:2409.19951</a> <span> [<a href="https://arxiv.org/pdf/2409.19951">pdf</a>, <a href="https://arxiv.org/format/2409.19951">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Law of the Weakest Link: Cross Capabilities of Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Zhong%2C+M">Ming Zhong</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+A">Aston Zhang</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+X">Xuewei Wang</a>, <a href="/search/cs?searchtype=author&query=Hou%2C+R">Rui Hou</a>, <a href="/search/cs?searchtype=author&query=Xiong%2C+W">Wenhan Xiong</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chenguang Zhu</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+Z">Zhengxing Chen</a>, <a href="/search/cs?searchtype=author&query=Tan%2C+L">Liang Tan</a>, <a href="/search/cs?searchtype=author&query=Bi%2C+C">Chloe Bi</a>, <a href="/search/cs?searchtype=author&query=Lewis%2C+M">Mike Lewis</a>, <a href="/search/cs?searchtype=author&query=Popuri%2C+S">Sravya Popuri</a>, <a href="/search/cs?searchtype=author&query=Narang%2C+S">Sharan Narang</a>, <a href="/search/cs?searchtype=author&query=Kambadur%2C+M">Melanie Kambadur</a>, <a href="/search/cs?searchtype=author&query=Mahajan%2C+D">Dhruv Mahajan</a>, <a href="/search/cs?searchtype=author&query=Edunov%2C+S">Sergey Edunov</a>, <a href="/search/cs?searchtype=author&query=Han%2C+J">Jiawei Han</a>, <a href="/search/cs?searchtype=author&query=van+der+Maaten%2C+L">Laurens van der Maaten</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.19951v2-abstract-short" style="display: inline;"> The development and evaluation of Large Language Models (LLMs) have largely focused on individual capabilities. However, this overlooks the intersection of multiple abilities across different types of expertise that are often required for real-world tasks, which we term cross capabilities. To systematically explore this concept, we first define seven core individual capabilities and then pair them… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.19951v2-abstract-full').style.display = 'inline'; document.getElementById('2409.19951v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.19951v2-abstract-full" style="display: none;"> The development and evaluation of Large Language Models (LLMs) have largely focused on individual capabilities. However, this overlooks the intersection of multiple abilities across different types of expertise that are often required for real-world tasks, which we term cross capabilities. To systematically explore this concept, we first define seven core individual capabilities and then pair them to form seven common cross capabilities, each supported by a manually constructed taxonomy. Building on these definitions, we introduce CrossEval, a benchmark comprising 1,400 human-annotated prompts, with 100 prompts for each individual and cross capability. To ensure reliable evaluation, we involve expert annotators to assess 4,200 model responses, gathering 8,400 human ratings with detailed explanations to serve as reference examples. Our findings reveal that, in both static evaluations and attempts to enhance specific abilities, current LLMs consistently exhibit the "Law of the Weakest Link," where cross-capability performance is significantly constrained by the weakest component. Specifically, across 58 cross-capability scores from 17 models, 38 scores are lower than all individual capabilities, while 20 fall between strong and weak, but closer to the weaker ability. These results highlight the under-performance of LLMs in cross-capability tasks, making the identification and improvement of the weakest capabilities a critical priority for future research to optimize performance in complex, multi-dimensional scenarios. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.19951v2-abstract-full').style.display = 'none'; document.getElementById('2409.19951v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Data, Code, & Benchmark: www.llm-cross-capabilities.org</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.19601">arXiv:2409.19601</a> <span> [<a href="https://arxiv.org/pdf/2409.19601">pdf</a>, <a href="https://arxiv.org/format/2409.19601">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Infighting in the Dark: Multi-Labels Backdoor Attack in Federated Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Li%2C+Y">Ye Li</a>, <a href="/search/cs?searchtype=author&query=Zhao%2C+Y">Yanchao Zhao</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chengcheng Zhu</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+J">Jiale Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.19601v2-abstract-short" style="display: inline;"> Federated Learning (FL), a privacy-preserving decentralized machine learning framework, has been shown to be vulnerable to backdoor attacks. Current research primarily focuses on the Single-Label Backdoor Attack (SBA), wherein adversaries share a consistent target. However, a critical fact is overlooked: adversaries may be non-cooperative, have distinct targets, and operate independently, which ex… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.19601v2-abstract-full').style.display = 'inline'; document.getElementById('2409.19601v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.19601v2-abstract-full" style="display: none;"> Federated Learning (FL), a privacy-preserving decentralized machine learning framework, has been shown to be vulnerable to backdoor attacks. Current research primarily focuses on the Single-Label Backdoor Attack (SBA), wherein adversaries share a consistent target. However, a critical fact is overlooked: adversaries may be non-cooperative, have distinct targets, and operate independently, which exhibits a more practical scenario called Multi-Label Backdoor Attack (MBA). Unfortunately, prior works are ineffective in MBA scenario since non-cooperative attackers exclude each other. In this work, we conduct an in-depth investigation to uncover the inherent constraints of the exclusion: similar backdoor mappings are constructed for different targets, resulting in conflicts among backdoor functions. To address this limitation, we propose Mirage, the first non-cooperative MBA strategy in FL that allows attackers to inject effective and persistent backdoors into the global model without collusion by constructing in-distribution (ID) backdoor mapping. Specifically, we introduce an adversarial adaptation method to bridge the backdoor features and the target distribution in an ID manner. Additionally, we further leverage a constrained optimization method to ensure the ID mapping survives in the global training dynamics. Extensive evaluations demonstrate that Mirage outperforms various state-of-the-art attacks and bypasses existing defenses, achieving an average ASR greater than 97\% and maintaining over 90\% after 900 rounds. This work aims to alert researchers to this potential threat and inspire the design of effective defense mechanisms. Code has been made open-source. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.19601v2-abstract-full').style.display = 'none'; document.getElementById('2409.19601v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 7 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.19527">arXiv:2409.19527</a> <span> [<a href="https://arxiv.org/pdf/2409.19527">pdf</a>, <a href="https://arxiv.org/format/2409.19527">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> BuildingView: Constructing Urban Building Exteriors Databases with Street View Imagery and Multimodal Large Language Mode </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Li%2C+Z">Zongrong Li</a>, <a href="/search/cs?searchtype=author&query=Su%2C+Y">Yunlei Su</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chenyuan Zhu</a>, <a href="/search/cs?searchtype=author&query=Zhao%2C+W">Wufan Zhao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.19527v1-abstract-short" style="display: inline;"> Urban Building Exteriors are increasingly important in urban analytics, driven by advancements in Street View Imagery and its integration with urban research. Multimodal Large Language Models (LLMs) offer powerful tools for urban annotation, enabling deeper insights into urban environments. However, challenges remain in creating accurate and detailed urban building exterior databases, identifying… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.19527v1-abstract-full').style.display = 'inline'; document.getElementById('2409.19527v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.19527v1-abstract-full" style="display: none;"> Urban Building Exteriors are increasingly important in urban analytics, driven by advancements in Street View Imagery and its integration with urban research. Multimodal Large Language Models (LLMs) offer powerful tools for urban annotation, enabling deeper insights into urban environments. However, challenges remain in creating accurate and detailed urban building exterior databases, identifying critical indicators for energy efficiency, environmental sustainability, and human-centric design, and systematically organizing these indicators. To address these challenges, we propose BuildingView, a novel approach that integrates high-resolution visual data from Google Street View with spatial information from OpenStreetMap via the Overpass API. This research improves the accuracy of urban building exterior data, identifies key sustainability and design indicators, and develops a framework for their extraction and categorization. Our methodology includes a systematic literature review, building and Street View sampling, and annotation using the ChatGPT-4O API. The resulting database, validated with data from New York City, Amsterdam, and Singapore, provides a comprehensive tool for urban studies, supporting informed decision-making in urban planning, architectural design, and environmental policy. The code for BuildingView is available at https://github.com/Jasper0122/BuildingView. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.19527v1-abstract-full').style.display = 'none'; document.getElementById('2409.19527v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 6 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.18125">arXiv:2409.18125</a> <span> [<a href="https://arxiv.org/pdf/2409.18125">pdf</a>, <a href="https://arxiv.org/format/2409.18125">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> LLaVA-3D: A Simple yet Effective Pathway to Empowering LMMs with 3D-awareness </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chenming Zhu</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+T">Tai Wang</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+W">Wenwei Zhang</a>, <a href="/search/cs?searchtype=author&query=Pang%2C+J">Jiangmiao Pang</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+X">Xihui Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.18125v1-abstract-short" style="display: inline;"> Recent advancements in Large Multimodal Models (LMMs) have greatly enhanced their proficiency in 2D visual understanding tasks, enabling them to effectively process and understand images and videos. However, the development of LMMs with 3D-awareness for 3D scene understanding has been hindered by the lack of large-scale 3D vision-language datasets and powerful 3D encoders. In this paper, we introd… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.18125v1-abstract-full').style.display = 'inline'; document.getElementById('2409.18125v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.18125v1-abstract-full" style="display: none;"> Recent advancements in Large Multimodal Models (LMMs) have greatly enhanced their proficiency in 2D visual understanding tasks, enabling them to effectively process and understand images and videos. However, the development of LMMs with 3D-awareness for 3D scene understanding has been hindered by the lack of large-scale 3D vision-language datasets and powerful 3D encoders. In this paper, we introduce a simple yet effective framework called LLaVA-3D. Leveraging the strong 2D understanding priors from LLaVA, our LLaVA-3D efficiently adapts LLaVA for 3D scene understanding without compromising 2D understanding capabilities. To achieve this, we employ a simple yet effective representation, 3D Patch, which connects 2D CLIP patch features with their corresponding positions in 3D space. By integrating the 3D Patches into 2D LMMs and employing joint 2D and 3D vision-language instruction tuning, we establish a unified architecture for both 2D image understanding and 3D scene understanding. Experimental results show that LLaVA-3D converges 3.5x faster than existing 3D LMMs when trained on 3D vision-language datasets. Moreover, LLaVA-3D not only achieves state-of-the-art performance across various 3D tasks but also maintains comparable 2D image understanding and vision-language conversation capabilities with LLaVA. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.18125v1-abstract-full').style.display = 'none'; document.getElementById('2409.18125v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Project page: https://zcmax.github.io/projects/LLaVA-3D/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.16788">arXiv:2409.16788</a> <span> [<a href="https://arxiv.org/pdf/2409.16788">pdf</a>, <a href="https://arxiv.org/format/2409.16788">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Mitigating the Bias of Large Language Model Evaluation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Zhou%2C+H">Hongli Zhou</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+H">Hui Huang</a>, <a href="/search/cs?searchtype=author&query=Long%2C+Y">Yunfei Long</a>, <a href="/search/cs?searchtype=author&query=Xu%2C+B">Bing Xu</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Conghui Zhu</a>, <a href="/search/cs?searchtype=author&query=Cao%2C+H">Hailong Cao</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+M">Muyun Yang</a>, <a href="/search/cs?searchtype=author&query=Zhao%2C+T">Tiejun Zhao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.16788v1-abstract-short" style="display: inline;"> Recently, there has been a trend of evaluating the Large Language Model (LLM) quality in the flavor of LLM-as-a-Judge, namely leveraging another LLM to evaluate the current output quality. However, existing judges are proven to be biased, namely they would favor answers which present better superficial quality (such as verbosity, fluency) while ignoring the instruction following ability. In this w… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.16788v1-abstract-full').style.display = 'inline'; document.getElementById('2409.16788v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.16788v1-abstract-full" style="display: none;"> Recently, there has been a trend of evaluating the Large Language Model (LLM) quality in the flavor of LLM-as-a-Judge, namely leveraging another LLM to evaluate the current output quality. However, existing judges are proven to be biased, namely they would favor answers which present better superficial quality (such as verbosity, fluency) while ignoring the instruction following ability. In this work, we propose systematic research about the bias of LLM-as-a-Judge. Specifically, for closed-source judge models, we apply calibration to mitigate the significance of superficial quality, both on probability level and prompt level. For open-source judge models, we propose to mitigate the bias by contrastive training, with curated negative samples that deviate from instruction but present better superficial quality. We apply our methods on the bias evaluation benchmark, and experiment results show our methods mitigate the bias by a large margin while maintaining a satisfactory evaluation accuracy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.16788v1-abstract-full').style.display = 'none'; document.getElementById('2409.16788v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.16136">arXiv:2409.16136</a> <span> [<a href="https://arxiv.org/pdf/2409.16136">pdf</a>, <a href="https://arxiv.org/format/2409.16136">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> </div> </div> <p class="title is-5 mathjax"> HA-FGOVD: Highlighting Fine-grained Attributes via Explicit Linear Composition for Open-Vocabulary Object Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ma%2C+Y">Yuqi Ma</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+M">Mengyin Liu</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chao Zhu</a>, <a href="/search/cs?searchtype=author&query=Yin%2C+X">Xu-Cheng Yin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.16136v1-abstract-short" style="display: inline;"> Open-vocabulary object detection (OVD) models are considered to be Large Multi-modal Models (LMM), due to their extensive training data and a large number of parameters. Mainstream OVD models prioritize object coarse-grained category rather than focus on their fine-grained attributes, e.g., colors or materials, thus failed to identify objects specified with certain attributes. However, OVD models… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.16136v1-abstract-full').style.display = 'inline'; document.getElementById('2409.16136v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.16136v1-abstract-full" style="display: none;"> Open-vocabulary object detection (OVD) models are considered to be Large Multi-modal Models (LMM), due to their extensive training data and a large number of parameters. Mainstream OVD models prioritize object coarse-grained category rather than focus on their fine-grained attributes, e.g., colors or materials, thus failed to identify objects specified with certain attributes. However, OVD models are pretrained on large-scale image-text pairs with rich attribute words, whose latent feature space can represent the global text feature as a linear composition of fine-grained attribute tokens without highlighting them. Therefore, we propose in this paper a universal and explicit approach for frozen mainstream OVD models that boosts their attribute-level detection capabilities by highlighting fine-grained attributes in explicit linear space. Firstly, a LLM is leveraged to highlight attribute words within the input text as a zero-shot prompted task. Secondly, by strategically adjusting the token masks, the text encoders of OVD models extract both global text and attribute-specific features, which are then explicitly composited as two vectors in linear space to form the new attribute-highlighted feature for detection tasks, where corresponding scalars are hand-crafted or learned to reweight both two vectors. Notably, these scalars can be seamlessly transferred among different OVD models, which proves that such an explicit linear composition is universal. Empirical evaluation on the FG-OVD dataset demonstrates that our proposed method uniformly improves fine-grained attribute-level OVD of various mainstream models and achieves new state-of-the-art performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.16136v1-abstract-full').style.display = 'none'; document.getElementById('2409.16136v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This work has been submitted to the IEEE for possible publication</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.06209">arXiv:2409.06209</a> <span> [<a href="https://arxiv.org/pdf/2409.06209">pdf</a>, <a href="https://arxiv.org/format/2409.06209">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Adaptive Transformer Modelling of Density Function for Nonparametric Survival Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Zhang%2C+X">Xin Zhang</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+D">Deval Mehta</a>, <a href="/search/cs?searchtype=author&query=Hu%2C+Y">Yanan Hu</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chao Zhu</a>, <a href="/search/cs?searchtype=author&query=Darby%2C+D">David Darby</a>, <a href="/search/cs?searchtype=author&query=Yu%2C+Z">Zhen Yu</a>, <a href="/search/cs?searchtype=author&query=Merlo%2C+D">Daniel Merlo</a>, <a href="/search/cs?searchtype=author&query=Gresle%2C+M">Melissa Gresle</a>, <a href="/search/cs?searchtype=author&query=Van+Der+Walt%2C+A">Anneke Van Der Walt</a>, <a href="/search/cs?searchtype=author&query=Butzkueven%2C+H">Helmut Butzkueven</a>, <a href="/search/cs?searchtype=author&query=Ge%2C+Z">Zongyuan Ge</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.06209v1-abstract-short" style="display: inline;"> Survival analysis holds a crucial role across diverse disciplines, such as economics, engineering and healthcare. It empowers researchers to analyze both time-invariant and time-varying data, encompassing phenomena like customer churn, material degradation and various medical outcomes. Given the complexity and heterogeneity of such data, recent endeavors have demonstrated successful integration of… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.06209v1-abstract-full').style.display = 'inline'; document.getElementById('2409.06209v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.06209v1-abstract-full" style="display: none;"> Survival analysis holds a crucial role across diverse disciplines, such as economics, engineering and healthcare. It empowers researchers to analyze both time-invariant and time-varying data, encompassing phenomena like customer churn, material degradation and various medical outcomes. Given the complexity and heterogeneity of such data, recent endeavors have demonstrated successful integration of deep learning methodologies to address limitations in conventional statistical approaches. However, current methods typically involve cluttered probability distribution function (PDF), have lower sensitivity in censoring prediction, only model static datasets, or only rely on recurrent neural networks for dynamic modelling. In this paper, we propose a novel survival regression method capable of producing high-quality unimodal PDFs without any prior distribution assumption, by optimizing novel Margin-Mean-Variance loss and leveraging the flexibility of Transformer to handle both temporal and non-temporal data, coined UniSurv. Extensive experiments on several datasets demonstrate that UniSurv places a significantly higher emphasis on censoring compared to other methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.06209v1-abstract-full').style.display = 'none'; document.getElementById('2409.06209v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.04979">arXiv:2409.04979</a> <span> [<a href="https://arxiv.org/pdf/2409.04979">pdf</a>, <a href="https://arxiv.org/format/2409.04979">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> RCBEVDet++: Toward High-accuracy Radar-Camera Fusion 3D Perception Network </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Lin%2C+Z">Zhiwei Lin</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+Z">Zhe Liu</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+Y">Yongtao Wang</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+L">Le Zhang</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Ce Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.04979v1-abstract-short" style="display: inline;"> Perceiving the surrounding environment is a fundamental task in autonomous driving. To obtain highly accurate perception results, modern autonomous driving systems typically employ multi-modal sensors to collect comprehensive environmental data. Among these, the radar-camera multi-modal perception system is especially favored for its excellent sensing capabilities and cost-effectiveness. However,… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.04979v1-abstract-full').style.display = 'inline'; document.getElementById('2409.04979v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.04979v1-abstract-full" style="display: none;"> Perceiving the surrounding environment is a fundamental task in autonomous driving. To obtain highly accurate perception results, modern autonomous driving systems typically employ multi-modal sensors to collect comprehensive environmental data. Among these, the radar-camera multi-modal perception system is especially favored for its excellent sensing capabilities and cost-effectiveness. However, the substantial modality differences between radar and camera sensors pose challenges in fusing information. To address this problem, this paper presents RCBEVDet, a radar-camera fusion 3D object detection framework. Specifically, RCBEVDet is developed from an existing camera-based 3D object detector, supplemented by a specially designed radar feature extractor, RadarBEVNet, and a Cross-Attention Multi-layer Fusion (CAMF) module. Firstly, RadarBEVNet encodes sparse radar points into a dense bird's-eye-view (BEV) feature using a dual-stream radar backbone and a Radar Cross Section aware BEV encoder. Secondly, the CAMF module utilizes a deformable attention mechanism to align radar and camera BEV features and adopts channel and spatial fusion layers to fuse them. To further enhance RCBEVDet's capabilities, we introduce RCBEVDet++, which advances the CAMF through sparse fusion, supports query-based multi-view camera perception models, and adapts to a broader range of perception tasks. Extensive experiments on the nuScenes show that our method integrates seamlessly with existing camera-based 3D perception models and improves their performance across various perception tasks. Furthermore, our method achieves state-of-the-art radar-camera fusion results in 3D object detection, BEV semantic segmentation, and 3D multi-object tracking tasks. Notably, with ViT-L as the image backbone, RCBEVDet++ achieves 72.73 NDS and 67.34 mAP in 3D object detection without test-time augmentation or model ensembling. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.04979v1-abstract-full').style.display = 'none'; document.getElementById('2409.04979v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The extended work of RCBEVDet (CVPR2024)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.03223">arXiv:2409.03223</a> <span> [<a href="https://arxiv.org/pdf/2409.03223">pdf</a>, <a href="https://arxiv.org/format/2409.03223">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Why mamba is effective? Exploit Linear Transformer-Mamba Network for Multi-Modality Image Fusion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chenguang Zhu</a>, <a href="/search/cs?searchtype=author&query=Gao%2C+S">Shan Gao</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+H">Huafeng Chen</a>, <a href="/search/cs?searchtype=author&query=Guo%2C+G">Guangqian Guo</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+C">Chaowei Wang</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+Y">Yaoxing Wang</a>, <a href="/search/cs?searchtype=author&query=Lei%2C+C+S">Chen Shu Lei</a>, <a href="/search/cs?searchtype=author&query=Fan%2C+Q">Quanjiang Fan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.03223v1-abstract-short" style="display: inline;"> Multi-modality image fusion aims to integrate the merits of images from different sources and render high-quality fusion images. However, existing feature extraction and fusion methods are either constrained by inherent local reduction bias and static parameters during inference (CNN) or limited by quadratic computational complexity (Transformers), and cannot effectively extract and fuse features.… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.03223v1-abstract-full').style.display = 'inline'; document.getElementById('2409.03223v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.03223v1-abstract-full" style="display: none;"> Multi-modality image fusion aims to integrate the merits of images from different sources and render high-quality fusion images. However, existing feature extraction and fusion methods are either constrained by inherent local reduction bias and static parameters during inference (CNN) or limited by quadratic computational complexity (Transformers), and cannot effectively extract and fuse features. To solve this problem, we propose a dual-branch image fusion network called Tmamba. It consists of linear Transformer and Mamba, which has global modeling capabilities while maintaining linear complexity. Due to the difference between the Transformer and Mamba structures, the features extracted by the two branches carry channel and position information respectively. T-M interaction structure is designed between the two branches, using global learnable parameters and convolutional layers to transfer position and channel information respectively. We further propose cross-modal interaction at the attention level to obtain cross-modal attention. Experiments show that our Tmamba achieves promising results in multiple fusion tasks, including infrared-visible image fusion and medical image fusion. Code with checkpoints will be available after the peer-review process. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.03223v1-abstract-full').style.display = 'none'; document.getElementById('2409.03223v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.16690">arXiv:2408.16690</a> <span> [<a href="https://arxiv.org/pdf/2408.16690">pdf</a>, <a href="https://arxiv.org/format/2408.16690">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Generic Objects as Pose Probes for Few-Shot View Synthesis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gao%2C+Z">Zhirui Gao</a>, <a href="/search/cs?searchtype=author&query=Yi%2C+R">Renjiao Yi</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chenyang Zhu</a>, <a href="/search/cs?searchtype=author&query=Zhuang%2C+K">Ke Zhuang</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+W">Wei Chen</a>, <a href="/search/cs?searchtype=author&query=Xu%2C+K">Kai Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.16690v2-abstract-short" style="display: inline;"> Radiance fields including NeRFs and 3D Gaussians demonstrate great potential in high-fidelity rendering and scene reconstruction, while they require a substantial number of posed images as inputs. COLMAP is frequently employed for preprocessing to estimate poses, while it necessitates a large number of feature matches to operate effectively, and it struggles with scenes characterized by sparse fea… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.16690v2-abstract-full').style.display = 'inline'; document.getElementById('2408.16690v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.16690v2-abstract-full" style="display: none;"> Radiance fields including NeRFs and 3D Gaussians demonstrate great potential in high-fidelity rendering and scene reconstruction, while they require a substantial number of posed images as inputs. COLMAP is frequently employed for preprocessing to estimate poses, while it necessitates a large number of feature matches to operate effectively, and it struggles with scenes characterized by sparse features, large baselines between images, or a limited number of input images. We aim to tackle few-view NeRF reconstruction using only 3 to 6 unposed scene images. Traditional methods often use calibration boards but they are not common in images. We propose a novel idea of utilizing everyday objects, commonly found in both images and real life, as "pose probes". The probe object is automatically segmented by SAM, whose shape is initialized from a cube. We apply a dual-branch volume rendering optimization (object NeRF and scene NeRF) to constrain the pose optimization and jointly refine the geometry. Specifically, object poses of two views are first estimated by PnP matching in an SDF representation, which serves as initial poses. PnP matching, requiring only a few features, is suitable for feature-sparse scenes. Additional views are incrementally incorporated to refine poses from preceding views. In experiments, PoseProbe achieves state-of-the-art performance in both pose estimation and novel view synthesis across multiple datasets. We demonstrate its effectiveness, particularly in few-view and large-baseline scenes where COLMAP struggles. In ablations, using different objects in a scene yields comparable performance. Our project page is available at: \href{https://zhirui-gao.github.io/PoseProbe.github.io/}{this https URL} <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.16690v2-abstract-full').style.display = 'none'; document.getElementById('2408.16690v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.12093">arXiv:2408.12093</a> <span> [<a href="https://arxiv.org/pdf/2408.12093">pdf</a>, <a href="https://arxiv.org/format/2408.12093">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> LLM-enhanced Scene Graph Learning for Household Rearrangement </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Li%2C+W">Wenhao Li</a>, <a href="/search/cs?searchtype=author&query=Yu%2C+Z">Zhiyuan Yu</a>, <a href="/search/cs?searchtype=author&query=She%2C+Q">Qijin She</a>, <a href="/search/cs?searchtype=author&query=Yu%2C+Z">Zhinan Yu</a>, <a href="/search/cs?searchtype=author&query=Lan%2C+Y">Yuqing Lan</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chenyang Zhu</a>, <a href="/search/cs?searchtype=author&query=Hu%2C+R">Ruizhen Hu</a>, <a href="/search/cs?searchtype=author&query=Xu%2C+K">Kai Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.12093v2-abstract-short" style="display: inline;"> The household rearrangement task involves spotting misplaced objects in a scene and accommodate them with proper places. It depends both on common-sense knowledge on the objective side and human user preference on the subjective side. In achieving such task, we propose to mine object functionality with user preference alignment directly from the scene itself, without relying on human intervention.… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.12093v2-abstract-full').style.display = 'inline'; document.getElementById('2408.12093v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.12093v2-abstract-full" style="display: none;"> The household rearrangement task involves spotting misplaced objects in a scene and accommodate them with proper places. It depends both on common-sense knowledge on the objective side and human user preference on the subjective side. In achieving such task, we propose to mine object functionality with user preference alignment directly from the scene itself, without relying on human intervention. To do so, we work with scene graph representation and propose LLM-enhanced scene graph learning which transforms the input scene graph into an affordance-enhanced graph (AEG) with information-enhanced nodes and newly discovered edges (relations). In AEG, the nodes corresponding to the receptacle objects are augmented with context-induced affordance which encodes what kind of carriable objects can be placed on it. New edges are discovered with newly discovered non-local relations. With AEG, we perform task planning for scene rearrangement by detecting misplaced carriables and determining a proper placement for each of them. We test our method by implementing a tiding robot in simulator and perform evaluation on a new benchmark we build. Extensive evaluations demonstrate that our method achieves state-of-the-art performance on misplacement detection and the following rearrangement planning. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.12093v2-abstract-full').style.display = 'none'; document.getElementById('2408.12093v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 21 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">SIGGRAPH ASIA 2024 conference accepted</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.11826">arXiv:2408.11826</a> <span> [<a href="https://arxiv.org/pdf/2408.11826">pdf</a>, <a href="https://arxiv.org/format/2408.11826">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Generative Organizational Behavior Simulation using Large Language Model based Autonomous Agents: A Holacracy Perspective </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chen Zhu</a>, <a href="/search/cs?searchtype=author&query=Cheng%2C+Y">Yihang Cheng</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+J">Jingshuai Zhang</a>, <a href="/search/cs?searchtype=author&query=Qiu%2C+Y">Yusheng Qiu</a>, <a href="/search/cs?searchtype=author&query=Xia%2C+S">Sitao Xia</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+H">Hengshu Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.11826v1-abstract-short" style="display: inline;"> In this paper, we present the technical details and periodic findings of our project, CareerAgent, which aims to build a generative simulation framework for a Holacracy organization using Large Language Model-based Autonomous Agents. Specifically, the simulation framework includes three phases: construction, execution, and evaluation, and it incorporates basic characteristics of individuals, organ… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.11826v1-abstract-full').style.display = 'inline'; document.getElementById('2408.11826v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.11826v1-abstract-full" style="display: none;"> In this paper, we present the technical details and periodic findings of our project, CareerAgent, which aims to build a generative simulation framework for a Holacracy organization using Large Language Model-based Autonomous Agents. Specifically, the simulation framework includes three phases: construction, execution, and evaluation, and it incorporates basic characteristics of individuals, organizations, tasks, and meetings. Through our simulation, we obtained several interesting findings. At the organizational level, an increase in the average values of management competence and functional competence can reduce overall members' stress levels, but it negatively impacts deeper organizational performance measures such as average task completion. At the individual level, both competences can improve members' work performance. From the analysis of social networks, we found that highly competent members selectively participate in certain tasks and take on more responsibilities. Over time, small sub-communities form around these highly competent members within the holacracy. These findings contribute theoretically to the study of organizational science and provide practical insights for managers to understand the organization dynamics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.11826v1-abstract-full').style.display = 'none'; document.getElementById('2408.11826v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.11312">arXiv:2408.11312</a> <span> [<a href="https://arxiv.org/pdf/2408.11312">pdf</a>, <a href="https://arxiv.org/format/2408.11312">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Swarm Intelligence in Geo-Localization: A Multi-Agent Large Vision-Language Model Collaborative Framework </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Han%2C+X">Xiao Han</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chen Zhu</a>, <a href="/search/cs?searchtype=author&query=Zhao%2C+X">Xiangyu Zhao</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+H">Hengshu Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.11312v3-abstract-short" style="display: inline;"> Visual geo-localization demands in-depth knowledge and advanced reasoning skills to associate images with precise real-world geographic locations. Existing image database retrieval methods are limited by the impracticality of storing sufficient visual records of global landmarks. Recently, Large Vision-Language Models (LVLMs) have demonstrated the capability of geo-localization through Visual Ques… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.11312v3-abstract-full').style.display = 'inline'; document.getElementById('2408.11312v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.11312v3-abstract-full" style="display: none;"> Visual geo-localization demands in-depth knowledge and advanced reasoning skills to associate images with precise real-world geographic locations. Existing image database retrieval methods are limited by the impracticality of storing sufficient visual records of global landmarks. Recently, Large Vision-Language Models (LVLMs) have demonstrated the capability of geo-localization through Visual Question Answering (VQA), enabling a solution that does not require external geo-tagged image records. However, the performance of a single LVLM is still limited by its intrinsic knowledge and reasoning capabilities. To address these challenges, we introduce smileGeo, a novel visual geo-localization framework that leverages multiple Internet-enabled LVLM agents operating within an agent-based architecture. By facilitating inter-agent communication, smileGeo integrates the inherent knowledge of these agents with additional retrieved information, enhancing the ability to effectively localize images. Furthermore, our framework incorporates a dynamic learning strategy that optimizes agent communication, reducing redundant interactions and enhancing overall system efficiency. To validate the effectiveness of the proposed framework, we conducted experiments on three different datasets, and the results show that our approach significantly outperforms current state-of-the-art methods. The source code is available at https://anonymous.4open.science/r/ViusalGeoLocalization-F8F5. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.11312v3-abstract-full').style.display = 'none'; document.getElementById('2408.11312v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">resubmit to www2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.10789">arXiv:2408.10789</a> <span> [<a href="https://arxiv.org/pdf/2408.10789">pdf</a>, <a href="https://arxiv.org/format/2408.10789">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Learning Part-aware 3D Representations by Fusing 2D Gaussians and Superquadrics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gao%2C+Z">Zhirui Gao</a>, <a href="/search/cs?searchtype=author&query=Yi%2C+R">Renjiao Yi</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+Y">Yuhang Huang</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+W">Wei Chen</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chenyang Zhu</a>, <a href="/search/cs?searchtype=author&query=Xu%2C+K">Kai Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.10789v1-abstract-short" style="display: inline;"> Low-level 3D representations, such as point clouds, meshes, NeRFs, and 3D Gaussians, are commonly used to represent 3D objects or scenes. However, humans usually perceive 3D objects or scenes at a higher level as a composition of parts or structures rather than points or voxels. Representing 3D as semantic parts can benefit further understanding and applications. We aim to solve part-aware 3D reco… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.10789v1-abstract-full').style.display = 'inline'; document.getElementById('2408.10789v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.10789v1-abstract-full" style="display: none;"> Low-level 3D representations, such as point clouds, meshes, NeRFs, and 3D Gaussians, are commonly used to represent 3D objects or scenes. However, humans usually perceive 3D objects or scenes at a higher level as a composition of parts or structures rather than points or voxels. Representing 3D as semantic parts can benefit further understanding and applications. We aim to solve part-aware 3D reconstruction, which parses objects or scenes into semantic parts. In this paper, we introduce a hybrid representation of superquadrics and 2D Gaussians, trying to dig 3D structural clues from multi-view image inputs. Accurate structured geometry reconstruction and high-quality rendering are achieved at the same time. We incorporate parametric superquadrics in mesh forms into 2D Gaussians by attaching Gaussian centers to faces in meshes. During the training, superquadrics parameters are iteratively optimized, and Gaussians are deformed accordingly, resulting in an efficient hybrid representation. On the one hand, this hybrid representation inherits the advantage of superquadrics to represent different shape primitives, supporting flexible part decomposition of scenes. On the other hand, 2D Gaussians are incorporated to model the complex texture and geometry details, ensuring high-quality rendering and geometry reconstruction. The reconstruction is fully unsupervised. We conduct extensive experiments on data from DTU and ShapeNet datasets, in which the method decomposes scenes into reasonable parts, outperforming existing state-of-the-art approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.10789v1-abstract-full').style.display = 'none'; document.getElementById('2408.10789v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.08978">arXiv:2408.08978</a> <span> [<a href="https://arxiv.org/pdf/2408.08978">pdf</a>, <a href="https://arxiv.org/format/2408.08978">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> See What LLMs Cannot Answer: A Self-Challenge Framework for Uncovering LLM Weaknesses </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chen%2C+Y">Yulong Chen</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+Y">Yang Liu</a>, <a href="/search/cs?searchtype=author&query=Yan%2C+J">Jianhao Yan</a>, <a href="/search/cs?searchtype=author&query=Bai%2C+X">Xuefeng Bai</a>, <a href="/search/cs?searchtype=author&query=Zhong%2C+M">Ming Zhong</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+Y">Yinghao Yang</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+Z">Ziyi Yang</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chenguang Zhu</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+Y">Yue Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.08978v2-abstract-short" style="display: inline;"> The impressive performance of Large Language Models (LLMs) has consistently surpassed numerous human-designed benchmarks, presenting new challenges in assessing the shortcomings of LLMs. Designing tasks and finding LLMs' limitations are becoming increasingly important. In this paper, we investigate the question of whether an LLM can discover its own limitations from the errors it makes. To this en… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.08978v2-abstract-full').style.display = 'inline'; document.getElementById('2408.08978v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.08978v2-abstract-full" style="display: none;"> The impressive performance of Large Language Models (LLMs) has consistently surpassed numerous human-designed benchmarks, presenting new challenges in assessing the shortcomings of LLMs. Designing tasks and finding LLMs' limitations are becoming increasingly important. In this paper, we investigate the question of whether an LLM can discover its own limitations from the errors it makes. To this end, we propose a Self-Challenge evaluation framework with human-in-the-loop. Starting from seed instances that GPT-4 fails to answer, we prompt GPT-4 to summarize error patterns that can be used to generate new instances and incorporate human feedback on them to refine these patterns for generating more challenging data, iteratively. We end up with 8 diverse patterns, such as text manipulation and questions with assumptions. We then build a benchmark, SC-G4, consisting of 1,835 instances generated by GPT-4 using these patterns, with human-annotated gold responses. The SC-G4 serves as a challenging benchmark that allows for a detailed assessment of LLMs' abilities. Our results show that only 44.96\% of instances in SC-G4 can be answered correctly by GPT-4. Interestingly, our pilot study indicates that these error patterns also challenge other LLMs, such as Claude-3 and Llama-3, and cannot be fully resolved through fine-tuning. Our work takes the first step to demonstrate that LLMs can autonomously identify their inherent flaws and provide insights for future dynamic and automatic evaluation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.08978v2-abstract-full').style.display = 'none'; document.getElementById('2408.08978v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">COLM 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.07907">arXiv:2408.07907</a> <span> [<a href="https://arxiv.org/pdf/2408.07907">pdf</a>, <a href="https://arxiv.org/format/2408.07907">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3640457.3688136">10.1145/3640457.3688136 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> AIE: Auction Information Enhanced Framework for CTR Prediction in Online Advertising </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Yang%2C+Y">Yang Yang</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+B">Bo Chen</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chenxu Zhu</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+M">Menghui Zhu</a>, <a href="/search/cs?searchtype=author&query=Dai%2C+X">Xinyi Dai</a>, <a href="/search/cs?searchtype=author&query=Guo%2C+H">Huifeng Guo</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+M">Muyu Zhang</a>, <a href="/search/cs?searchtype=author&query=Dong%2C+Z">Zhenhua Dong</a>, <a href="/search/cs?searchtype=author&query=Tang%2C+R">Ruiming Tang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.07907v1-abstract-short" style="display: inline;"> Click-Through Rate (CTR) prediction is a fundamental technique for online advertising recommendation and the complex online competitive auction process also brings many difficulties to CTR optimization. Recent studies have shown that introducing posterior auction information contributes to the performance of CTR prediction. However, existing work doesn't fully capitalize on the benefits of auction… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.07907v1-abstract-full').style.display = 'inline'; document.getElementById('2408.07907v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.07907v1-abstract-full" style="display: none;"> Click-Through Rate (CTR) prediction is a fundamental technique for online advertising recommendation and the complex online competitive auction process also brings many difficulties to CTR optimization. Recent studies have shown that introducing posterior auction information contributes to the performance of CTR prediction. However, existing work doesn't fully capitalize on the benefits of auction information and overlooks the data bias brought by the auction, leading to biased and suboptimal results. To address these limitations, we propose Auction Information Enhanced Framework (AIE) for CTR prediction in online advertising, which delves into the problem of insufficient utilization of auction signals and first reveals the auction bias. Specifically, AIE introduces two pluggable modules, namely Adaptive Market-price Auxiliary Module (AM2) and Bid Calibration Module (BCM), which work collaboratively to excavate the posterior auction signals better and enhance the performance of CTR prediction. Furthermore, the two proposed modules are lightweight, model-agnostic, and friendly to inference latency. Extensive experiments are conducted on a public dataset and an industrial dataset to demonstrate the effectiveness and compatibility of AIE. Besides, a one-month online A/B test in a large-scale advertising platform shows that AIE improves the base model by 5.76% and 2.44% in terms of eCPM and CTR, respectively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.07907v1-abstract-full').style.display = 'none'; document.getElementById('2408.07907v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.07795">arXiv:2408.07795</a> <span> [<a href="https://arxiv.org/pdf/2408.07795">pdf</a>, <a href="https://arxiv.org/ps/2408.07795">ps</a>, <a href="https://arxiv.org/format/2408.07795">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Exoskeleton-Assisted Balance and Task Evaluation During Quiet Stance and Kneeling in Construction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Sreenivasan%2C+G">Gayatri Sreenivasan</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chunchu Zhu</a>, <a href="/search/cs?searchtype=author&query=Yi%2C+J">Jingang Yi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.07795v1-abstract-short" style="display: inline;"> Construction workers exert intense physical effort and experience serious safety and health risks in hazardous working environments. Quiet stance and kneeling are among the most common postures performed by construction workers during their daily work. This paper analyzes lower-limb joint influence on neural balance control strategies using the frequency behavior of the intersection point of groun… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.07795v1-abstract-full').style.display = 'inline'; document.getElementById('2408.07795v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.07795v1-abstract-full" style="display: none;"> Construction workers exert intense physical effort and experience serious safety and health risks in hazardous working environments. Quiet stance and kneeling are among the most common postures performed by construction workers during their daily work. This paper analyzes lower-limb joint influence on neural balance control strategies using the frequency behavior of the intersection point of ground reaction forces. To evaluate the impact of elevation and wearable knee exoskeletons on postural balance and welding task performance, we design and integrate virtual- and mixed-reality (VR/MR) to simulate elevated environments and welding tasks. A linear quadratic regulator-controlled triple- and double-link inverted pendulum model is used for balance strategy quantification in quiet stance and kneeling, respectively. Extensive multi-subject experiments are conducted to evaluate the usability of occupational exoskeletons in destabilizing construction environments. The quantified balance strategies capture the significance of knee joint during balance control of quiet stance and kneeling gaits. Results show that center of pressure sway area reduced up to 62% in quiet stance and 39% in kneeling for subjects tested in high-elevation VR/MR worksites when provided knee exoskeleton assistance. The comprehensive balance and multitask evaluation methodology developed aims to reveal exoskeleton design considerations to mitigate the fall risk in construction. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.07795v1-abstract-full').style.display = 'none'; document.getElementById('2408.07795v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 15 figures, submitted to IEEE Transactions on Automation Science and Engineering</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.04092">arXiv:2408.04092</a> <span> [<a href="https://arxiv.org/pdf/2408.04092">pdf</a>, <a href="https://arxiv.org/format/2408.04092">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> </div> </div> <p class="title is-5 mathjax"> Programmable Dataflows: Abstraction and Programming Model for Data Sharing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Xia%2C+S">Siyuan Xia</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chris Zhu</a>, <a href="/search/cs?searchtype=author&query=Srivastava%2C+T">Tapan Srivastava</a>, <a href="/search/cs?searchtype=author&query=Fahey%2C+B">Bridget Fahey</a>, <a href="/search/cs?searchtype=author&query=Fernandez%2C+R+C">Raul Castro Fernandez</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.04092v1-abstract-short" style="display: inline;"> Data sharing is central to a wide variety of applications such as fraud detection, ad matching, and research. The lack of data sharing abstractions makes the solution to each data sharing problem bespoke and cost-intensive, hampering value generation. In this paper, we first introduce a data sharing model to represent every data sharing problem with a sequence of dataflows. From the model, we dist… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.04092v1-abstract-full').style.display = 'inline'; document.getElementById('2408.04092v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.04092v1-abstract-full" style="display: none;"> Data sharing is central to a wide variety of applications such as fraud detection, ad matching, and research. The lack of data sharing abstractions makes the solution to each data sharing problem bespoke and cost-intensive, hampering value generation. In this paper, we first introduce a data sharing model to represent every data sharing problem with a sequence of dataflows. From the model, we distill an abstraction, the contract, which agents use to communicate the intent of a dataflow and evaluate its consequences, before the dataflow takes place. This helps agents move towards a common sharing goal without violating any regulatory and privacy constraints. Then, we design and implement the contract programming model (CPM), which allows agents to program data sharing applications catered to each problem's needs. Contracts permit data sharing, but their interactive nature may introduce inefficiencies. To mitigate those inefficiencies, we extend the CPM so that it can save intermediate outputs of dataflows, and skip computation if a dataflow tries to access data that it does not have access to. In our evaluation, we show that 1) the contract abstraction is general enough to represent a wide range of sharing problems, 2) we can write programs for complex data sharing problems and exhibit qualitative improvements over other alternate technologies, and 3) quantitatively, our optimizations make sharing programs written with the CPM efficient. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.04092v1-abstract-full').style.display = 'none'; document.getElementById('2408.04092v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.03977">arXiv:2408.03977</a> <span> [<a href="https://arxiv.org/pdf/2408.03977">pdf</a>, <a href="https://arxiv.org/format/2408.03977">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Learning from Noisy Labels for Long-tailed Data via Optimal Transport </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Li%2C+M">Mengting Li</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chuang Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.03977v1-abstract-short" style="display: inline;"> Noisy labels, which are common in real-world datasets, can significantly impair the training of deep learning models. However, recent adversarial noise-combating methods overlook the long-tailed distribution of real data, which can significantly harm the effect of denoising strategies. Meanwhile, the mismanagement of noisy labels further compromises the model's ability to handle long-tailed data.… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.03977v1-abstract-full').style.display = 'inline'; document.getElementById('2408.03977v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.03977v1-abstract-full" style="display: none;"> Noisy labels, which are common in real-world datasets, can significantly impair the training of deep learning models. However, recent adversarial noise-combating methods overlook the long-tailed distribution of real data, which can significantly harm the effect of denoising strategies. Meanwhile, the mismanagement of noisy labels further compromises the model's ability to handle long-tailed data. To tackle this issue, we propose a novel approach to manage data characterized by both long-tailed distributions and noisy labels. First, we introduce a loss-distance cross-selection module, which integrates class predictions and feature distributions to filter clean samples, effectively addressing uncertainties introduced by noisy labels and long-tailed distributions. Subsequently, we employ optimal transport strategies to generate pseudo-labels for the noise set in a semi-supervised training manner, enhancing pseudo-label quality while mitigating the effects of sample scarcity caused by the long-tailed distribution. We conduct experiments on both synthetic and real-world datasets, and the comprehensive experimental results demonstrate that our method surpasses current state-of-the-art methods. Our code will be available in the future. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.03977v1-abstract-full').style.display = 'none'; document.getElementById('2408.03977v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.21740">arXiv:2407.21740</a> <span> [<a href="https://arxiv.org/pdf/2407.21740">pdf</a>, <a href="https://arxiv.org/format/2407.21740">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Contrastive Factor Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Duan%2C+Z">Zhibin Duan</a>, <a href="/search/cs?searchtype=author&query=Wen%2C+T">Tiansheng Wen</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+Y">Yifei Wang</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chen Zhu</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+B">Bo Chen</a>, <a href="/search/cs?searchtype=author&query=Zhou%2C+M">Mingyuan Zhou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.21740v2-abstract-short" style="display: inline;"> Factor analysis, often regarded as a Bayesian variant of matrix factorization, offers superior capabilities in capturing uncertainty, modeling complex dependencies, and ensuring robustness. As the deep learning era arrives, factor analysis is receiving less and less attention due to their limited expressive ability. On the contrary, contrastive learning has emerged as a potent technique with demon… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.21740v2-abstract-full').style.display = 'inline'; document.getElementById('2407.21740v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.21740v2-abstract-full" style="display: none;"> Factor analysis, often regarded as a Bayesian variant of matrix factorization, offers superior capabilities in capturing uncertainty, modeling complex dependencies, and ensuring robustness. As the deep learning era arrives, factor analysis is receiving less and less attention due to their limited expressive ability. On the contrary, contrastive learning has emerged as a potent technique with demonstrated efficacy in unsupervised representational learning. While the two methods are different paradigms, recent theoretical analysis has revealed the mathematical equivalence between contrastive learning and matrix factorization, providing a potential possibility for factor analysis combined with contrastive learning. Motivated by the interconnectedness of contrastive learning, matrix factorization, and factor analysis, this paper introduces a novel Contrastive Factor Analysis framework, aiming to leverage factor analysis's advantageous properties within the realm of contrastive learning. To further leverage the interpretability properties of non-negative factor analysis, which can learn disentangled representations, contrastive factor analysis is extended to a non-negative version. Finally, extensive experimental validation showcases the efficacy of the proposed contrastive (non-negative) factor analysis methodology across multiple key properties, including expressiveness, robustness, interpretability, and accurate uncertainty estimation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.21740v2-abstract-full').style.display = 'none'; document.getElementById('2407.21740v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 31 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.20242">arXiv:2407.20242</a> <span> [<a href="https://arxiv.org/pdf/2407.20242">pdf</a>, <a href="https://arxiv.org/format/2407.20242">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> BadRobot: Manipulating Embodied LLMs in the Physical World </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Zhang%2C+H">Hangtao Zhang</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chenyu Zhu</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+X">Xianlong Wang</a>, <a href="/search/cs?searchtype=author&query=Zhou%2C+Z">Ziqi Zhou</a>, <a href="/search/cs?searchtype=author&query=Yin%2C+C">Changgan Yin</a>, <a href="/search/cs?searchtype=author&query=Li%2C+M">Minghui Li</a>, <a href="/search/cs?searchtype=author&query=Xue%2C+L">Lulu Xue</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+Y">Yichen Wang</a>, <a href="/search/cs?searchtype=author&query=Hu%2C+S">Shengshan Hu</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+A">Aishan Liu</a>, <a href="/search/cs?searchtype=author&query=Guo%2C+P">Peijin Guo</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+L+Y">Leo Yu Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.20242v3-abstract-short" style="display: inline;"> Embodied AI represents systems where AI is integrated into physical entities, enabling them to perceive and interact with their surroundings. Large Language Model (LLM), which exhibits powerful language understanding abilities, has been extensively employed in embodied AI by facilitating sophisticated task planning. However, a critical safety issue remains overlooked: could these embodied LLMs per… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.20242v3-abstract-full').style.display = 'inline'; document.getElementById('2407.20242v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.20242v3-abstract-full" style="display: none;"> Embodied AI represents systems where AI is integrated into physical entities, enabling them to perceive and interact with their surroundings. Large Language Model (LLM), which exhibits powerful language understanding abilities, has been extensively employed in embodied AI by facilitating sophisticated task planning. However, a critical safety issue remains overlooked: could these embodied LLMs perpetrate harmful behaviors? In response, we introduce BadRobot, a novel attack paradigm aiming to make embodied LLMs violate safety and ethical constraints through typical voice-based user-system interactions. Specifically, three vulnerabilities are exploited to achieve this type of attack: (i) manipulation of LLMs within robotic systems, (ii) misalignment between linguistic outputs and physical actions, and (iii) unintentional hazardous behaviors caused by world knowledge's flaws. Furthermore, we construct a benchmark of various malicious physical action queries to evaluate BadRobot's attack performance. Based on this benchmark, extensive experiments against existing prominent embodied LLM frameworks (e.g., Voxposer, Code as Policies, and ProgPrompt) demonstrate the effectiveness of our BadRobot. Warning: This paper contains harmful AI-generated language and aggressive actions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.20242v3-abstract-full').style.display = 'none'; document.getElementById('2407.20242v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">38 pages, 16 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.19512">arXiv:2407.19512</a> <span> [<a href="https://arxiv.org/pdf/2407.19512">pdf</a>, <a href="https://arxiv.org/format/2407.19512">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Large-scale cervical precancerous screening via AI-assisted cytology whole slide image analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Li%2C+H">Honglin Li</a>, <a href="/search/cs?searchtype=author&query=Sun%2C+Y">Yusuan Sun</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chenglu Zhu</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+Y">Yunlong Zhang</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+S">Shichuan Zhang</a>, <a href="/search/cs?searchtype=author&query=Shui%2C+Z">Zhongyi Shui</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+P">Pingyi Chen</a>, <a href="/search/cs?searchtype=author&query=Li%2C+J">Jingxiong Li</a>, <a href="/search/cs?searchtype=author&query=Zheng%2C+S">Sunyi Zheng</a>, <a href="/search/cs?searchtype=author&query=Cui%2C+C">Can Cui</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+L">Lin Yang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.19512v1-abstract-short" style="display: inline;"> Cervical Cancer continues to be the leading gynecological malignancy, posing a persistent threat to women's health on a global scale. Early screening via cytology Whole Slide Image (WSI) diagnosis is critical to prevent this Cancer progression and improve survival rate, but pathologist's single test suffers inevitable false negative due to the immense number of cells that need to be reviewed withi… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.19512v1-abstract-full').style.display = 'inline'; document.getElementById('2407.19512v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.19512v1-abstract-full" style="display: none;"> Cervical Cancer continues to be the leading gynecological malignancy, posing a persistent threat to women's health on a global scale. Early screening via cytology Whole Slide Image (WSI) diagnosis is critical to prevent this Cancer progression and improve survival rate, but pathologist's single test suffers inevitable false negative due to the immense number of cells that need to be reviewed within a WSI. Though computer-aided automated diagnostic models can serve as strong complement for pathologists, their effectiveness is hampered by the paucity of extensive and detailed annotations, coupled with the limited interpretability and robustness. These factors significantly hinder their practical applicability and reliability in clinical settings. To tackle these challenges, we develop an AI approach, which is a Scalable Technology for Robust and Interpretable Diagnosis built on Extensive data (STRIDE) of cervical cytology. STRIDE addresses the bottleneck of limited annotations by integrating patient-level labels with a small portion of cell-level labels through an end-to-end training strategy, facilitating scalable learning across extensive datasets. To further improve the robustness to real-world domain shifts of cytology slide-making and imaging, STRIDE employs color adversarial samples training that mimic staining and imaging variations. Lastly, to achieve pathologist-level interpretability for the trustworthiness in clinical settings, STRIDE can generate explanatory textual descriptions that simulates pathologists' diagnostic processes by cell image feature and textual description alignment. Conducting extensive experiments and evaluations in 183 medical centers with a dataset of 341,889 WSIs and 0.1 billion cells from cervical cytology patients, STRIDE has demonstrated a remarkable superiority over previous state-of-the-art techniques. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.19512v1-abstract-full').style.display = 'none'; document.getElementById('2407.19512v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.15708">arXiv:2407.15708</a> <span> [<a href="https://arxiv.org/pdf/2407.15708">pdf</a>, <a href="https://arxiv.org/format/2407.15708">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> SwinSF: Image Reconstruction from Spatial-Temporal Spike Streams </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Jiang%2C+L">Liangyan Jiang</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chuang Zhu</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+Y">Yanxu Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.15708v2-abstract-short" style="display: inline;"> The spike camera, with its high temporal resolution, low latency, and high dynamic range, addresses high-speed imaging challenges like motion blur. It captures photons at each pixel independently, creating binary spike streams rich in temporal information but challenging for image reconstruction. Current algorithms, both traditional and deep learning-based, still need to be improved in the utiliza… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.15708v2-abstract-full').style.display = 'inline'; document.getElementById('2407.15708v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.15708v2-abstract-full" style="display: none;"> The spike camera, with its high temporal resolution, low latency, and high dynamic range, addresses high-speed imaging challenges like motion blur. It captures photons at each pixel independently, creating binary spike streams rich in temporal information but challenging for image reconstruction. Current algorithms, both traditional and deep learning-based, still need to be improved in the utilization of the rich temporal detail and the restoration of the details of the reconstructed image. To overcome this, we introduce Swin Spikeformer (SwinSF), a novel model for dynamic scene reconstruction from spike streams. SwinSF is composed of Spike Feature Extraction, Spatial-Temporal Feature Extraction, and Final Reconstruction Module. It combines shifted window self-attention and proposed temporal spike attention, ensuring a comprehensive feature extraction that encapsulates both spatial and temporal dynamics, leading to a more robust and accurate reconstruction of spike streams. Furthermore, we build a new synthesized dataset for spike image reconstruction which matches the resolution of the latest spike camera, ensuring its relevance and applicability to the latest developments in spike camera imaging. Experimental results demonstrate that the proposed network SwinSF sets a new benchmark, achieving state-of-the-art performance across a series of datasets, including both real-world and synthesized data across various resolutions. Our codes and proposed dataset will be available soon. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.15708v2-abstract-full').style.display = 'none'; document.getElementById('2407.15708v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.15600">arXiv:2407.15600</a> <span> [<a href="https://arxiv.org/pdf/2407.15600">pdf</a>, <a href="https://arxiv.org/format/2407.15600">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> A Pairwise Comparison Relation-assisted Multi-objective Evolutionary Neural Architecture Search Method with Multi-population Mechanism </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Xue%2C+Y">Yu Xue</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chenchen Zhu</a>, <a href="/search/cs?searchtype=author&query=Zhou%2C+M">MengChu Zhou</a>, <a href="/search/cs?searchtype=author&query=Wahib%2C+M">Mohamed Wahib</a>, <a href="/search/cs?searchtype=author&query=Gabbouj%2C+M">Moncef Gabbouj</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.15600v1-abstract-short" style="display: inline;"> Neural architecture search (NAS) enables re-searchers to automatically explore vast search spaces and find efficient neural networks. But NAS suffers from a key bottleneck, i.e., numerous architectures need to be evaluated during the search process, which requires a lot of computing resources and time. In order to improve the efficiency of NAS, a series of methods have been proposed to reduce the… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.15600v1-abstract-full').style.display = 'inline'; document.getElementById('2407.15600v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.15600v1-abstract-full" style="display: none;"> Neural architecture search (NAS) enables re-searchers to automatically explore vast search spaces and find efficient neural networks. But NAS suffers from a key bottleneck, i.e., numerous architectures need to be evaluated during the search process, which requires a lot of computing resources and time. In order to improve the efficiency of NAS, a series of methods have been proposed to reduce the evaluation time of neural architectures. However, they are not efficient enough and still only focus on the accuracy of architectures. In addition to the classification accuracy, more efficient and smaller network architectures are required in real-world applications. To address the above problems, we propose the SMEM-NAS, a pairwise com-parison relation-assisted multi-objective evolutionary algorithm based on a multi-population mechanism. In the SMEM-NAS, a surrogate model is constructed based on pairwise compari-son relations to predict the accuracy ranking of architectures, rather than the absolute accuracy. Moreover, two populations cooperate with each other in the search process, i.e., a main population guides the evolution, while a vice population expands the diversity. Our method aims to provide high-performance models that take into account multiple optimization objectives. We conduct a series of experiments on the CIFAR-10, CIFAR-100 and ImageNet datasets to verify its effectiveness. With only a single GPU searching for 0.17 days, competitive architectures can be found by SMEM-NAS which achieves 78.91% accuracy with the MAdds of 570M on the ImageNet. This work makes a significant advance in the important field of NAS. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.15600v1-abstract-full').style.display = 'none'; document.getElementById('2407.15600v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.06913">arXiv:2407.06913</a> <span> </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> </div> </div> <p class="title is-5 mathjax"> A Simple, Nearly-Optimal Algorithm for Differentially Private All-Pairs Shortest Distances </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Campbell%2C+J">Jesse Campbell</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chunjiang Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.06913v2-abstract-short" style="display: inline;"> The all-pairs shortest distances (APSD) with differential privacy (DP) problem takes as input an undirected, weighted graph $G = (V,E, \mathbf{w})$ and outputs a private estimate of the shortest distances in $G$ between all pairs of vertices. In this paper, we present a simple $\widetilde{O}(n^{1/3}/\varepsilon)$-accurate algorithm to solve APSD with $\varepsilon$-DP, which reduces to… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.06913v2-abstract-full').style.display = 'inline'; document.getElementById('2407.06913v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.06913v2-abstract-full" style="display: none;"> The all-pairs shortest distances (APSD) with differential privacy (DP) problem takes as input an undirected, weighted graph $G = (V,E, \mathbf{w})$ and outputs a private estimate of the shortest distances in $G$ between all pairs of vertices. In this paper, we present a simple $\widetilde{O}(n^{1/3}/\varepsilon)$-accurate algorithm to solve APSD with $\varepsilon$-DP, which reduces to $\widetilde{O}(n^{1/4}/\varepsilon)$ in the $(\varepsilon, 未)$-DP setting, where $n = |V|$. Our algorithm greatly improves upon the error of prior algorithms, namely $\widetilde{O}(n^{2/3}/\varepsilon)$ and $\widetilde{O}(\sqrt{n}/\varepsilon)$ in the two respective settings, and is the first to be optimal up to a polylogarithmic factor, based on a lower bound of $\widetilde惟(n^{1/4})$. In the case where a multiplicative approximation is allowed, we give two different constructions of algorithms with reduced additive error. Our first construction allows a multiplicative approximation of $O(k\log{\log{n}})$ and has additive error $\widetilde{O}(k\cdot n^{1/k}/\varepsilon)$ in the $\varepsilon$-DP case and $\widetilde{O}(\sqrt{k}\cdot n^{1/(2k)}/\varepsilon)$ in the $(\varepsilon, 未)$-DP case. Our second construction allows multiplicative approximation $2k-1$ and has the same asymptotic additive error as the first construction. Both constructions significantly improve upon the currently best-known additive error of, $\widetilde{O}(k\cdot n^{1/2 + 1/(4k+2)}/\varepsilon)$ and $\widetilde{O}(k\cdot n^{1/3 + 2/(9k+3)}/\varepsilon)$, respectively. Our algorithms are straightforward and work by decomposing a graph into a set of spanning trees, and applying a key observation that we can privately release APSD in trees with $O(\text{polylog}(n))$ error. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.06913v2-abstract-full').style.display = 'none'; document.getElementById('2407.06913v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Error in Section 3: (1) Improper assumption that the topology of the shortest path trees are public. (2) Improper usage of Lemma 2.4. Error in Section 4: Improper assumption that the topology of the shortest path trees are public</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.05603">arXiv:2407.05603</a> <span> [<a href="https://arxiv.org/pdf/2407.05603">pdf</a>, <a href="https://arxiv.org/format/2407.05603">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> WSI-VQA: Interpreting Whole Slide Images by Generative Visual Question Answering </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chen%2C+P">Pingyi Chen</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Chenglu Zhu</a>, <a href="/search/cs?searchtype=author&query=Zheng%2C+S">Sunyi Zheng</a>, <a href="/search/cs?searchtype=author&query=Li%2C+H">Honglin Li</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+L">Lin Yang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.05603v1-abstract-short" style="display: inline;"> Whole slide imaging is routinely adopted for carcinoma diagnosis and prognosis. Abundant experience is required for pathologists to achieve accurate and reliable diagnostic results of whole slide images (WSI). The huge size and heterogeneous features of WSIs make the workflow of pathological reading extremely time-consuming. In this paper, we propose a novel framework (WSI-VQA) to interpret WSIs b… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.05603v1-abstract-full').style.display = 'inline'; document.getElementById('2407.05603v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.05603v1-abstract-full" style="display: none;"> Whole slide imaging is routinely adopted for carcinoma diagnosis and prognosis. Abundant experience is required for pathologists to achieve accurate and reliable diagnostic results of whole slide images (WSI). The huge size and heterogeneous features of WSIs make the workflow of pathological reading extremely time-consuming. In this paper, we propose a novel framework (WSI-VQA) to interpret WSIs by generative visual question answering. WSI-VQA shows universality by reframing various kinds of slide-level tasks in a question-answering pattern, in which pathologists can achieve immunohistochemical grading, survival prediction, and tumor subtyping following human-machine interaction. Furthermore, we establish a WSI-VQA dataset which contains 8672 slide-level question-answering pairs with 977 WSIs. Besides the ability to deal with different slide-level tasks, our generative model which is named Wsi2Text Transformer (W2T) outperforms existing discriminative models in medical correctness, which reveals the potential of our model to be applied in the clinical scenario. Additionally, we also visualize the co-attention mapping between word embeddings and WSIs as an intuitive explanation for diagnostic results. The dataset and related code are available at https://github.com/cpystan/WSI-VQA. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.05603v1-abstract-full').style.display = 'none'; document.getElementById('2407.05603v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at ECCV 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.04396">arXiv:2407.04396</a> <span> [<a href="https://arxiv.org/pdf/2407.04396">pdf</a>, <a href="https://arxiv.org/format/2407.04396">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Graph-Guided Test-Time Adaptation for Glaucoma Diagnosis using Fundus Photography </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Zeng%2C+Q">Qian Zeng</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+L">Le Zhang</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+Y">Yipeng Liu</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+C">Ce Zhu</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+F">Fan Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.04396v2-abstract-short" style="display: inline;"> Glaucoma is a leading cause of irreversible blindness worldwide. While deep learning approaches using fundus images have largely improved early diagnosis of glaucoma, variations in images from different devices and locations (known as domain shifts) challenge the use of pre-trained models in real-world settings. To address this, we propose a novel Graph-guided Test-Time Adaptation (GTTA) framework… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.04396v2-abstract-full').style.display = 'inline'; document.getElementById('2407.04396v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.04396v2-abstract-full" style="display: none;"> Glaucoma is a leading cause of irreversible blindness worldwide. While deep learning approaches using fundus images have largely improved early diagnosis of glaucoma, variations in images from different devices and locations (known as domain shifts) challenge the use of pre-trained models in real-world settings. To address this, we propose a novel Graph-guided Test-Time Adaptation (GTTA) framework to generalize glaucoma diagnosis models to unseen test environments. GTTA integrates the topological information of fundus images into the model training, enhancing the model's transferability and reducing the risk of learning spurious correlation. During inference, GTTA introduces a novel test-time training objective to make the source-trained classifier progressively adapt to target patterns with reliable class conditional estimation and consistency regularization. Experiments on cross-domain glaucoma diagnosis benchmarks demonstrate the superiority of the overall framework and individual components under different backbone networks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.04396v2-abstract-full').style.display = 'none'; document.getElementById('2407.04396v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 3 figures, 3 tables, submitted to MICCAI</span> </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&query=Zhu%2C+C&start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&query=Zhu%2C+C&start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&query=Zhu%2C+C&start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&query=Zhu%2C+C&start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&query=Zhu%2C+C&start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&query=Zhu%2C+C&start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> <li><span class="pagination-ellipsis">…</span></li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>