CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 238 results for author: <span class="mathjax">Deng, W</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Deng%2C+W">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Deng, W"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Deng%2C+W&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Deng, W"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Deng%2C+W&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Deng%2C+W&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Deng%2C+W&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Deng%2C+W&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Deng%2C+W&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Deng%2C+W&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.16277">arXiv:2411.16277</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.16277">pdf</a>, <a href="https://arxiv.org/format/2411.16277">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="General Economics">econ.GN</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Engineering, Finance, and Science">cs.CE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Finance">q-fin.CP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> FinML-Chain: A Blockchain-Integrated Dataset for Enhanced Financial Machine Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jingfeng Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Wanlin Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+D">Dangxing Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+L">Luyao Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.16277v1-abstract-short" style="display: inline;"> Machine learning is critical for innovation and efficiency in financial markets, offering predictive models and data-driven decision-making. However, challenges such as missing data, lack of transparency, untimely updates, insecurity, and incompatible data sources limit its effectiveness. Blockchain technology, with its transparency, immutability, and real-time updates, addresses these challenges.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16277v1-abstract-full').style.display = 'inline'; document.getElementById('2411.16277v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.16277v1-abstract-full" style="display: none;"> Machine learning is critical for innovation and efficiency in financial markets, offering predictive models and data-driven decision-making. However, challenges such as missing data, lack of transparency, untimely updates, insecurity, and incompatible data sources limit its effectiveness. Blockchain technology, with its transparency, immutability, and real-time updates, addresses these challenges. We present a framework for integrating high-frequency on-chain data with low-frequency off-chain data, providing a benchmark for addressing novel research questions in economic mechanism design. This framework generates modular, extensible datasets for analyzing economic mechanisms such as the Transaction Fee Mechanism, enabling multi-modal insights and fairness-driven evaluations. Using four machine learning techniques, including linear regression, deep neural networks, XGBoost, and LSTM models, we demonstrate the framework&#39;s ability to produce datasets that advance financial research and improve understanding of blockchain-driven systems. Our contributions include: (1) proposing a research scenario for the Transaction Fee Mechanism and demonstrating how the framework addresses previously unexplored questions in economic mechanism design; (2) providing a benchmark for financial machine learning by open-sourcing a sample dataset generated by the framework and the code for the pipeline, enabling continuous dataset expansion; and (3) promoting reproducibility, transparency, and collaboration by fully open-sourcing the framework and its outputs. This initiative supports researchers in extending our work and developing innovative financial machine-learning models, fostering advancements at the intersection of machine learning, blockchain, and economics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16277v1-abstract-full').style.display = 'none'; document.getElementById('2411.16277v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09289">arXiv:2411.09289</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09289">pdf</a>, <a href="https://arxiv.org/format/2411.09289">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> StreamAdapter: Efficient Test Time Adaptation from Contextual Streams </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Muhtar%2C+D">Dilxat Muhtar</a>, <a href="/search/cs?searchtype=author&amp;query=Shen%2C+Y">Yelong Shen</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Y">Yaming Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+X">Xiaodong Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+Y">Yadong Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+J">Jianfeng Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhan%2C+Y">Yuefeng Zhan</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+H">Hao Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weiwei Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+F">Feng Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+X">Xueliang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+J">Jianfeng Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+W">Weizhu Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Q">Qi Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09289v1-abstract-short" style="display: inline;"> In-context learning (ICL) allows large language models (LLMs) to adapt to new tasks directly from the given demonstrations without requiring gradient updates. While recent advances have expanded context windows to accommodate more demonstrations, this approach increases inference costs without necessarily improving performance. To mitigate these issues, We propose StreamAdapter, a novel approach t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09289v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09289v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09289v1-abstract-full" style="display: none;"> In-context learning (ICL) allows large language models (LLMs) to adapt to new tasks directly from the given demonstrations without requiring gradient updates. While recent advances have expanded context windows to accommodate more demonstrations, this approach increases inference costs without necessarily improving performance. To mitigate these issues, We propose StreamAdapter, a novel approach that directly updates model parameters from context at test time, eliminating the need for explicit in-context demonstrations. StreamAdapter employs context mapping and weight absorption mechanisms to dynamically transform ICL demonstrations into parameter updates with minimal additional parameters. By reducing reliance on numerous in-context examples, StreamAdapter significantly reduce inference costs and allows for efficient inference with constant time complexity, regardless of demonstration count. Extensive experiments across diverse tasks and model architectures demonstrate that StreamAdapter achieves comparable or superior adaptation capability to ICL while requiring significantly fewer demonstrations. The superior task adaptation and context encoding capabilities of StreamAdapter on both language understanding and generation tasks provides a new perspective for adapting LLMs at test time using context, allowing for more efficient adaptation across scenarios and more cost-effective inference <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09289v1-abstract-full').style.display = 'none'; document.getElementById('2411.09289v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">22 Pages, 9 Figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.01453">arXiv:2411.01453</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.01453">pdf</a>, <a href="https://arxiv.org/format/2411.01453">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation">stat.CO</span> </div> </div> <p class="title is-5 mathjax"> Denoising Fisher Training For Neural Implicit Samplers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Luo%2C+W">Weijian Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Wei Deng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.01453v1-abstract-short" style="display: inline;"> Efficient sampling from un-normalized target distributions is pivotal in scientific computing and machine learning. While neural samplers have demonstrated potential with a special emphasis on sampling efficiency, existing neural implicit samplers still have issues such as poor mode covering behavior, unstable training dynamics, and sub-optimal performances. To tackle these issues, in this paper,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01453v1-abstract-full').style.display = 'inline'; document.getElementById('2411.01453v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.01453v1-abstract-full" style="display: none;"> Efficient sampling from un-normalized target distributions is pivotal in scientific computing and machine learning. While neural samplers have demonstrated potential with a special emphasis on sampling efficiency, existing neural implicit samplers still have issues such as poor mode covering behavior, unstable training dynamics, and sub-optimal performances. To tackle these issues, in this paper, we introduce Denoising Fisher Training (DFT), a novel training approach for neural implicit samplers with theoretical guarantees. We frame the training problem as an objective of minimizing the Fisher divergence by deriving a tractable yet equivalent loss function, which marks a unique theoretical contribution to assessing the intractable Fisher divergences. DFT is empirically validated across diverse sampling benchmarks, including two-dimensional synthetic distribution, Bayesian logistic regression, and high-dimensional energy-based models (EBMs). Notably, in experiments with high-dimensional EBMs, our best one-step DFT neural sampler achieves results on par with MCMC methods with up to 200 sampling steps, leading to a substantially greater efficiency over 100 times higher. This result not only demonstrates the superior performance of DFT in handling complex high-dimensional sampling but also sheds light on efficient sampling methodologies across broader applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.01453v1-abstract-full').style.display = 'none'; document.getElementById('2411.01453v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.00722">arXiv:2411.00722</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.00722">pdf</a>, <a href="https://arxiv.org/format/2411.00722">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Token-level Proximal Policy Optimization for Query Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ouyang%2C+Y">Yichen Ouyang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+L">Lu Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+F">Fangkai Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+P">Pu Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+C">Chenghua Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+J">Jianfeng Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Pang%2C+B">Bochen Pang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Y">Yaming Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhan%2C+Y">Yuefeng Zhan</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+H">Hao Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+Q">Qingwei Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Rajmohan%2C+S">Saravan Rajmohan</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weiwei Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+D">Dongmei Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+F">Feng Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Q">Qi Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.00722v1-abstract-short" style="display: inline;"> Query generation is a critical task for web search engines (e.g. Google, Bing) and recommendation systems. Recently, state-of-the-art query generation methods leverage Large Language Models (LLMs) for their strong capabilities in context understanding and text generation. However, they still face challenges in generating high-quality queries in terms of inferring user intent based on their web sea&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.00722v1-abstract-full').style.display = 'inline'; document.getElementById('2411.00722v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.00722v1-abstract-full" style="display: none;"> Query generation is a critical task for web search engines (e.g. Google, Bing) and recommendation systems. Recently, state-of-the-art query generation methods leverage Large Language Models (LLMs) for their strong capabilities in context understanding and text generation. However, they still face challenges in generating high-quality queries in terms of inferring user intent based on their web search interaction history. In this paper, we propose Token-level Proximal Policy Optimization (TPPO), a noval approach designed to empower LLMs perform better in query generation through fine-tuning. TPPO is based on the Reinforcement Learning from AI Feedback (RLAIF) paradigm, consisting of a token-level reward model and a token-level proximal policy optimization module to address the sparse reward challenge in traditional RLAIF frameworks. To evaluate the effectiveness and robustness of TPPO, we conducted experiments on both open-source dataset and an industrial dataset that was collected from a globally-used search engine. The experimental results demonstrate that TPPO significantly improves the performance of query generation for LLMs and outperforms its existing competitors. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.00722v1-abstract-full').style.display = 'none'; document.getElementById('2411.00722v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.22985">arXiv:2410.22985</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.22985">pdf</a>, <a href="https://arxiv.org/ps/2410.22985">ps</a>, <a href="https://arxiv.org/format/2410.22985">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Troubling Taxonomies in GenAI Evaluation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Berman%2C+G">Glen Berman</a>, <a href="/search/cs?searchtype=author&amp;query=Cooper%2C+N">Ned Cooper</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W+H">Wesley Hanwen Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Hutchinson%2C+B">Ben Hutchinson</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.22985v1-abstract-short" style="display: inline;"> To evaluate the societal impacts of GenAI requires a model of how social harms emerge from interactions between GenAI, people, and societal structures. Yet a model is rarely explicitly defined in societal impact evaluations, or in the taxonomies of societal impacts that support them. In this provocation, we argue that societal impacts should be conceptualised as application- and context-specific,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.22985v1-abstract-full').style.display = 'inline'; document.getElementById('2410.22985v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.22985v1-abstract-full" style="display: none;"> To evaluate the societal impacts of GenAI requires a model of how social harms emerge from interactions between GenAI, people, and societal structures. Yet a model is rarely explicitly defined in societal impact evaluations, or in the taxonomies of societal impacts that support them. In this provocation, we argue that societal impacts should be conceptualised as application- and context-specific, incommensurable, and shaped by questions of social power. Doing so leads us to conclude that societal impact evaluations using existing taxonomies are inherently limited, in terms of their potential to reveal how GenAI systems may interact with people when introduced into specific social contexts. We therefore propose a governance-first approach to managing societal harms attended by GenAI technologies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.22985v1-abstract-full').style.display = 'none'; document.getElementById('2410.22985v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">3 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.18125">arXiv:2410.18125</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.18125">pdf</a>, <a href="https://arxiv.org/format/2410.18125">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> Towards Edge General Intelligence via Large Language Models: Opportunities and Challenges </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+H">Handi Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weipeng Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+S">Shuo Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+J">Jinfeng Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Z">Zhihan Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Ngai%2C+E+C+H">Edith C. H. Ngai</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+J">Jiangchuan Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+X">Xue Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.18125v1-abstract-short" style="display: inline;"> Edge Intelligence (EI) has been instrumental in delivering real-time, localized services by leveraging the computational capabilities of edge networks. The integration of Large Language Models (LLMs) empowers EI to evolve into the next stage: Edge General Intelligence (EGI), enabling more adaptive and versatile applications that require advanced understanding and reasoning capabilities. However, s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.18125v1-abstract-full').style.display = 'inline'; document.getElementById('2410.18125v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.18125v1-abstract-full" style="display: none;"> Edge Intelligence (EI) has been instrumental in delivering real-time, localized services by leveraging the computational capabilities of edge networks. The integration of Large Language Models (LLMs) empowers EI to evolve into the next stage: Edge General Intelligence (EGI), enabling more adaptive and versatile applications that require advanced understanding and reasoning capabilities. However, systematic exploration in this area remains insufficient. This survey delineates the distinctions between EGI and traditional EI, categorizing LLM-empowered EGI into three conceptual systems: centralized, hybrid, and decentralized. For each system, we detail the framework designs and review existing implementations. Furthermore, we evaluate the performance and throughput of various Small Language Models (SLMs) that are more suitable for development on edge devices. This survey provides researchers with a comprehensive vision of EGI, offering insights into its vast potential and establishing a foundation for future advancements in this rapidly evolving field. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.18125v1-abstract-full').style.display = 'none'; document.getElementById('2410.18125v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.17073">arXiv:2410.17073</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.17073">pdf</a>, <a href="https://arxiv.org/format/2410.17073">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> </div> </div> <p class="title is-5 mathjax"> Personalized Playback Technology: How Short Video Services Create Excellent User Experience </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weihui Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Fan%2C+Z">Zhiwei Fan</a>, <a href="/search/cs?searchtype=author&amp;query=Fu%2C+D">Deliang Fu</a>, <a href="/search/cs?searchtype=author&amp;query=Gong%2C+Y">Yun Gong</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+S">Shenglan Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+X">Xiaocheng Li</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Z">Zheng Li</a>, <a href="/search/cs?searchtype=author&amp;query=Liao%2C+Y">Yiting Liao</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+H">He Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Qiao%2C+C">Chunyu Qiao</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+B">Bin Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhen Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Xiong%2C+Z">Zhengyu Xiong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.17073v2-abstract-short" style="display: inline;"> Short-form video content has become increasingly popular and influential in recent years. Its concise yet engaging format aligns well with todays&#39; fast-paced and on-the-go lifestyles, making it a dominating trend in the digital world. As one of the front runners in the short video platform space, ByteDance has been highly successful in delivering a one-of-a-kind short video experience and attracti&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.17073v2-abstract-full').style.display = 'inline'; document.getElementById('2410.17073v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.17073v2-abstract-full" style="display: none;"> Short-form video content has become increasingly popular and influential in recent years. Its concise yet engaging format aligns well with todays&#39; fast-paced and on-the-go lifestyles, making it a dominating trend in the digital world. As one of the front runners in the short video platform space, ByteDance has been highly successful in delivering a one-of-a-kind short video experience and attracting billions of users worldwide. One key contributing factor is its advanced end-to-end personalized short video playback technology, where we pioneered and developed the new technical field over the past five years to optimize user experience. This paper introduces the major concepts and methodologies of this personalized video playback technology that distinguish it from traditional multimedia technologies. More details, including goal setting, iterative process, modeling, experimental methods and required supporting systems, are also provided to encourage deeper research in this area. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.17073v2-abstract-full').style.display = 'none'; document.getElementById('2410.17073v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.14584">arXiv:2410.14584</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.14584">pdf</a>, <a href="https://arxiv.org/format/2410.14584">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> MCSFF: Multi-modal Consistency and Specificity Fusion Framework for Entity Alignment </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ai%2C+W">Wei Ai</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Wen Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+H">Hongyi Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+J">Jiayi Du</a>, <a href="/search/cs?searchtype=author&amp;query=Meng%2C+T">Tao Meng</a>, <a href="/search/cs?searchtype=author&amp;query=Shou%2C+Y">Yuntao Shou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.14584v1-abstract-short" style="display: inline;"> Multi-modal entity alignment (MMEA) is essential for enhancing knowledge graphs and improving information retrieval and question-answering systems. Existing methods often focus on integrating modalities through their complementarity but overlook the specificity of each modality, which can obscure crucial features and reduce alignment accuracy. To solve this, we propose the Multi-modal Consistency&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.14584v1-abstract-full').style.display = 'inline'; document.getElementById('2410.14584v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.14584v1-abstract-full" style="display: none;"> Multi-modal entity alignment (MMEA) is essential for enhancing knowledge graphs and improving information retrieval and question-answering systems. Existing methods often focus on integrating modalities through their complementarity but overlook the specificity of each modality, which can obscure crucial features and reduce alignment accuracy. To solve this, we propose the Multi-modal Consistency and Specificity Fusion Framework (MCSFF), which innovatively integrates both complementary and specific aspects of modalities. We utilize Scale Computing&#39;s hyper-converged infrastructure to optimize IT management and resource allocation in large-scale data processing. Our framework first computes similarity matrices for each modality using modality embeddings to preserve their unique characteristics. Then, an iterative update method denoises and enhances modality features to fully express critical information. Finally, we integrate the updated information from all modalities to create enriched and precise entity representations. Experiments show our method outperforms current state-of-the-art MMEA baselines on the MMKG dataset, demonstrating its effectiveness and practical potential. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.14584v1-abstract-full').style.display = 'none'; document.getElementById('2410.14584v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">6 pages, 1 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.14332">arXiv:2410.14332</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.14332">pdf</a>, <a href="https://arxiv.org/format/2410.14332">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Croc: Pretraining Large Multimodal Models with Cross-Modal Comprehension </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xie%2C+Y">Yin Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+K">Kaicheng Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+N">Ninghua Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weimo Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Dai%2C+X">Xiangzi Dai</a>, <a href="/search/cs?searchtype=author&amp;query=Gu%2C+T">Tiancheng Gu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yumeng Wang</a>, <a href="/search/cs?searchtype=author&amp;query=An%2C+X">Xiang An</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Y">Yongle Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+Z">Ziyong Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+J">Jiankang Deng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.14332v1-abstract-short" style="display: inline;"> Recent advances in Large Language Models (LLMs) have catalyzed the development of Large Multimodal Models (LMMs). However, existing research primarily focuses on tuning language and image instructions, ignoring the critical pretraining phase where models learn to process textual and visual modalities jointly. In this paper, we propose a new pretraining paradigm for LMMs to enhance the visual compr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.14332v1-abstract-full').style.display = 'inline'; document.getElementById('2410.14332v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.14332v1-abstract-full" style="display: none;"> Recent advances in Large Language Models (LLMs) have catalyzed the development of Large Multimodal Models (LMMs). However, existing research primarily focuses on tuning language and image instructions, ignoring the critical pretraining phase where models learn to process textual and visual modalities jointly. In this paper, we propose a new pretraining paradigm for LMMs to enhance the visual comprehension capabilities of LLMs by introducing a novel cross-modal comprehension stage. Specifically, we design a dynamically learnable prompt token pool and employ the Hungarian algorithm to replace part of the original visual tokens with the most relevant prompt tokens. Then, we conceptualize visual tokens as analogous to a &#34;foreign language&#34; for the LLMs and propose a mixed attention mechanism with bidirectional visual attention and unidirectional textual attention to comprehensively enhance the understanding of visual tokens. Meanwhile, we integrate a detailed caption generation task, leveraging rich descriptions to further facilitate LLMs in understanding visual semantic information. After pretraining on 1.5 million publicly accessible data, we present a new foundation model called Croc. Experimental results demonstrate that Croc achieves new state-of-the-art performance on massive vision-language benchmarks. To support reproducibility and facilitate further research, we release the training code and pre-trained model weights at https://github.com/deepglint/Croc. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.14332v1-abstract-full').style.display = 'none'; document.getElementById('2410.14332v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 11 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.12496">arXiv:2410.12496</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.12496">pdf</a>, <a href="https://arxiv.org/format/2410.12496">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Programming Languages">cs.PL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3698810">10.1145/3698810 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Finding Logic Bugs in Spatial Database Engines via Affine Equivalent Inputs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Wenjing Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Mang%2C+Q">Qiuyang Mang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+C">Chengyu Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Rigger%2C+M">Manuel Rigger</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.12496v2-abstract-short" style="display: inline;"> Spatial Database Management Systems (SDBMSs) aim to store, manipulate, and retrieve spatial data. SDBMSs are employed in various modern applications, such as geographic information systems, computer-aided design tools, and location-based services. However, the presence of logic bugs in SDBMSs can lead to incorrect results, substantially undermining the reliability of these applications. Detecting&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.12496v2-abstract-full').style.display = 'inline'; document.getElementById('2410.12496v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.12496v2-abstract-full" style="display: none;"> Spatial Database Management Systems (SDBMSs) aim to store, manipulate, and retrieve spatial data. SDBMSs are employed in various modern applications, such as geographic information systems, computer-aided design tools, and location-based services. However, the presence of logic bugs in SDBMSs can lead to incorrect results, substantially undermining the reliability of these applications. Detecting logic bugs in SDBMSs is challenging due to the lack of ground truth for identifying incorrect results. In this paper, we propose an automated geometry-aware generator to generate high-quality SQL statements for SDBMSs and a novel concept named Affine Equivalent Inputs (AEI) to validate the results of SDBMSs. We implemented them as a tool named Spatter (Spatial DBMSs Tester) for finding logic bugs in four popular SDBMSs: PostGIS, DuckDB Spatial, MySQL, and SQL Server. Our testing campaign detected 34 previously unknown and unique bugs in these SDBMS, of which 30 have been confirmed, and 18 have been already fixed. Our testing efforts have been well appreciated by the developers. Experimental results demonstrate that the geometry-aware generator significantly outperforms a naive random-shape generator in detecting unique bugs, and AEI can identify 14 logic bugs in SDBMSs that were overlooked by previous methodologies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.12496v2-abstract-full').style.display = 'none'; document.getElementById('2410.12496v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.12475">arXiv:2410.12475</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.12475">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> </div> </div> <p class="title is-5 mathjax"> Aegis:An Advanced LLM-Based Multi-Agent for Intelligent Functional Safety Engineering </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shi%2C+L">Lu Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Qi%2C+B">Bin Qi</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+J">Jiarui Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Liang%2C+Z">Zhanzhao Liang</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+Z">Zhaowei Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Wenke Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+L">Lin Sun</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.12475v2-abstract-short" style="display: inline;"> Functional safety is a critical aspect of automotive engineering, encompassing all phases of a vehicle&#39;s lifecycle, including design, development, production, operation, and decommissioning. This domain involves highly knowledge-intensive tasks. This paper introduces Aegis: An Advanced LLM-Based Multi-Agent for Intelligent Functional Safety Engineering. Aegis is specifically designed to support co&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.12475v2-abstract-full').style.display = 'inline'; document.getElementById('2410.12475v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.12475v2-abstract-full" style="display: none;"> Functional safety is a critical aspect of automotive engineering, encompassing all phases of a vehicle&#39;s lifecycle, including design, development, production, operation, and decommissioning. This domain involves highly knowledge-intensive tasks. This paper introduces Aegis: An Advanced LLM-Based Multi-Agent for Intelligent Functional Safety Engineering. Aegis is specifically designed to support complex functional safety tasks within the automotive sector. It is tailored to perform Hazard Analysis and Risk Assessment(HARA), document Functional Safety Requirements(FSR), and plan test cases for Automatic Emergency Braking(AEB) systems. The most advanced version, Aegis-Max, leverages Retrieval-Augmented Generation(RAG) and reflective mechanisms to enhance its capability in managing complex, knowledge-intensive tasks. Additionally, targeted prompt refinement by professional functional safety practitioners can significantly optimize Aegis&#39;s performance in the functional safety domain. This paper demonstrates the potential of Aegis to improve the efficiency and effectiveness of functional safety processes in automotive engineering. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.12475v2-abstract-full').style.display = 'none'; document.getElementById('2410.12475v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.09344">arXiv:2410.09344</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.09344">pdf</a>, <a href="https://arxiv.org/format/2410.09344">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> DARE the Extreme: Revisiting Delta-Parameter Pruning For Fine-Tuned Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Wenlong Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Y">Yize Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Vakilian%2C+V">Vala Vakilian</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+M">Minghui Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+X">Xiaoxiao Li</a>, <a href="/search/cs?searchtype=author&amp;query=Thrampoulidis%2C+C">Christos Thrampoulidis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.09344v1-abstract-short" style="display: inline;"> Storing open-source fine-tuned models separately introduces redundancy and increases response times in applications utilizing multiple models. Delta-parameter pruning (DPP), particularly the random drop and rescale (DARE) method proposed by Yu et al., addresses this by pruning the majority of delta parameters--the differences between fine-tuned and pre-trained model weights--while typically mainta&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.09344v1-abstract-full').style.display = 'inline'; document.getElementById('2410.09344v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.09344v1-abstract-full" style="display: none;"> Storing open-source fine-tuned models separately introduces redundancy and increases response times in applications utilizing multiple models. Delta-parameter pruning (DPP), particularly the random drop and rescale (DARE) method proposed by Yu et al., addresses this by pruning the majority of delta parameters--the differences between fine-tuned and pre-trained model weights--while typically maintaining minimal performance loss. However, DARE fails when either the pruning rate or the magnitude of the delta parameters is large. We highlight two key reasons for this failure: (1) an excessively large rescaling factor as pruning rates increase, and (2) high mean and variance in the delta parameters. To push DARE&#39;s limits, we introduce DAREx (DARE the eXtreme), which features two algorithmic improvements: (1) DAREx-q, a rescaling factor modification that significantly boosts performance at high pruning rates (e.g., &gt;30 % on COLA and SST2 for encoder models, with even greater gains in decoder models), and (2) DAREx-L2, which combines DARE with AdamR, an in-training method that applies appropriate delta regularization before DPP. We also demonstrate that DAREx-q can be seamlessly combined with vanilla parameter-efficient fine-tuning techniques like LoRA and can facilitate structural DPP. Additionally, we revisit the application of importance-based pruning techniques within DPP, demonstrating that they outperform random-based methods when delta parameters are large. Through this comprehensive study, we develop a pipeline for selecting the most appropriate DPP method under various practical scenarios. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.09344v1-abstract-full').style.display = 'none'; document.getElementById('2410.09344v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.09132">arXiv:2410.09132</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.09132">pdf</a>, <a href="https://arxiv.org/format/2410.09132">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> When Graph meets Multimodal: Benchmarking on Multimodal Attributed Graphs Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yan%2C+H">Hao Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+C">Chaozhuo Li</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+Z">Zhigang Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Yin%2C+J">Jun Yin</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+R">Ruochen Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+P">Peiyan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+W">Weihao Han</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+M">Mingzheng Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zeng%2C+Z">Zhengxin Zeng</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+H">Hao Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weiwei Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+F">Feng Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Q">Qi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Senzhang Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.09132v1-abstract-short" style="display: inline;"> Multimodal attributed graphs (MAGs) are prevalent in various real-world scenarios and generally contain two kinds of knowledge: (a) Attribute knowledge is mainly supported by the attributes of different modalities contained in nodes (entities) themselves, such as texts and images. (b) Topology knowledge, on the other hand, is provided by the complex interactions posed between nodes. The cornerston&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.09132v1-abstract-full').style.display = 'inline'; document.getElementById('2410.09132v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.09132v1-abstract-full" style="display: none;"> Multimodal attributed graphs (MAGs) are prevalent in various real-world scenarios and generally contain two kinds of knowledge: (a) Attribute knowledge is mainly supported by the attributes of different modalities contained in nodes (entities) themselves, such as texts and images. (b) Topology knowledge, on the other hand, is provided by the complex interactions posed between nodes. The cornerstone of MAG representation learning lies in the seamless integration of multimodal attributes and topology. Recent advancements in Pre-trained Language/Vision models (PLMs/PVMs) and Graph neural networks (GNNs) have facilitated effective learning on MAGs, garnering increased research interest. However, the absence of meaningful benchmark datasets and standardized evaluation procedures for MAG representation learning has impeded progress in this field. In this paper, we propose Multimodal Attribute Graph Benchmark (MAGB)}, a comprehensive and diverse collection of challenging benchmark datasets for MAGs. The MAGB datasets are notably large in scale and encompass a wide range of domains, spanning from e-commerce networks to social networks. In addition to the brand-new datasets, we conduct extensive benchmark experiments over MAGB with various learning paradigms, ranging from GNN-based and PLM-based methods, to explore the necessity and feasibility of integrating multimodal attributes and graph topology. In a nutshell, we provide an overview of the MAG datasets, standardized evaluation procedures, and present baseline experiments. The entire MAGB project is publicly accessible at https://github.com/sktsherlock/ATG. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.09132v1-abstract-full').style.display = 'none'; document.getElementById('2410.09132v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.08901">arXiv:2410.08901</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.08901">pdf</a>, <a href="https://arxiv.org/format/2410.08901">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> SegGrasp: Zero-Shot Task-Oriented Grasping via Semantic and Geometric Guided Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+H">Haosheng Li</a>, <a href="/search/cs?searchtype=author&amp;query=Mao%2C+W">Weixin Mao</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weipeng Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Meng%2C+C">Chenyu Meng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+R">Rui Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+F">Fan Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+T">Tiancai Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Fan%2C+H">Haoqiang Fan</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Hongan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+X">Xiaoming Deng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.08901v2-abstract-short" style="display: inline;"> Task-oriented grasping, which involves grasping specific parts of objects based on their functions, is crucial for developing advanced robotic systems capable of performing complex tasks in dynamic environments. In this paper, we propose a training-free framework that incorporates both semantic and geometric priors for zero-shot task-oriented grasp generation. The proposed framework, SegGrasp, fir&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.08901v2-abstract-full').style.display = 'inline'; document.getElementById('2410.08901v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.08901v2-abstract-full" style="display: none;"> Task-oriented grasping, which involves grasping specific parts of objects based on their functions, is crucial for developing advanced robotic systems capable of performing complex tasks in dynamic environments. In this paper, we propose a training-free framework that incorporates both semantic and geometric priors for zero-shot task-oriented grasp generation. The proposed framework, SegGrasp, first leverages the vision-language models like GLIP for coarse segmentation. It then uses detailed geometric information from convex decomposition to improve segmentation quality through a fusion policy named GeoFusion. An effective grasp pose can be generated by a grasping network with improved segmentation. We conducted the experiments on both segmentation benchmark and real-world robot grasping. The experimental results show that SegGrasp surpasses the baseline by more than 15\% in grasp and segmentation performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.08901v2-abstract-full').style.display = 'none'; document.getElementById('2410.08901v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7pages,6 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.06514">arXiv:2410.06514</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.06514">pdf</a>, <a href="https://arxiv.org/format/2410.06514">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> MORSE: An Efficient Homomorphic Secret Sharing Scheme Enabling Non-Linear Operation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weiquan Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+B">Bowen Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Xiao%2C+Y">Yang Xiao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhong%2C+Y">Yantao Zhong</a>, <a href="/search/cs?searchtype=author&amp;query=Pei%2C+Q">Qingqi Pei</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+X">Ximeng Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.06514v1-abstract-short" style="display: inline;"> Homomorphic secret sharing (HSS) enables two servers to locally perform functions on encrypted data directly and obtain the results in the form of shares. A Paillier-based HSS solution seamlessly achieves multiplicative homomorphism and consumes less communication costs. Unfortunately, existing Paillier-based HSS schemes suffer from a large private key size, potential calculation error, expensive&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.06514v1-abstract-full').style.display = 'inline'; document.getElementById('2410.06514v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.06514v1-abstract-full" style="display: none;"> Homomorphic secret sharing (HSS) enables two servers to locally perform functions on encrypted data directly and obtain the results in the form of shares. A Paillier-based HSS solution seamlessly achieves multiplicative homomorphism and consumes less communication costs. Unfortunately, existing Paillier-based HSS schemes suffer from a large private key size, potential calculation error, expensive computation and storage overhead, and only valid on linear operations (e.g., addition and multiplication). To this end, inspired by the Paillier cryptosystem with fast encryption and decryption, we propose MORSE, an efficient homomorphic secret sharing scheme enabling non-linear operation, which enjoys a small key size, no calculation error and low overhead. In terms of functions, MORSE supports addition, subtraction, multiplication, scalar-multiplication, and comparison. Particularly, we carefully design two conversion protocols achieving the mutual conversion between one Paillier ciphertext and two secret shares, which allows MORSE to continuously perform the above operations. Rigorous analyses demonstrate that MORSE securely outputs correct results. Experimental results show that MORSE makes a runtime improvement of up to 9.3 times in terms of secure multiplication, and a communication costs reduction of up to 16.6% in secure comparison, compared to the state-of-the-art. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.06514v1-abstract-full').style.display = 'none'; document.getElementById('2410.06514v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.01534">arXiv:2410.01534</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.01534">pdf</a>, <a href="https://arxiv.org/format/2410.01534">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Toward a Holistic Evaluation of Robustness in CLIP Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tu%2C+W">Weijie Tu</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weijian Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Gedeon%2C+T">Tom Gedeon</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.01534v1-abstract-short" style="display: inline;"> Contrastive Language-Image Pre-training (CLIP) models have shown significant potential, particularly in zero-shot classification across diverse distribution shifts. Building on existing evaluations of overall classification robustness, this work aims to provide a more comprehensive assessment of CLIP by introducing several new perspectives. First, we investigate their robustness to variations in s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.01534v1-abstract-full').style.display = 'inline'; document.getElementById('2410.01534v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.01534v1-abstract-full" style="display: none;"> Contrastive Language-Image Pre-training (CLIP) models have shown significant potential, particularly in zero-shot classification across diverse distribution shifts. Building on existing evaluations of overall classification robustness, this work aims to provide a more comprehensive assessment of CLIP by introducing several new perspectives. First, we investigate their robustness to variations in specific visual factors. Second, we assess two critical safety objectives--confidence uncertainty and out-of-distribution detection--beyond mere classification accuracy. Third, we evaluate the finesse with which CLIP models bridge the image and text modalities. Fourth, we extend our examination to 3D awareness in CLIP models, moving beyond traditional 2D image understanding. Finally, we explore the interaction between vision and language encoders within modern large multimodal models (LMMs) that utilize CLIP as the visual backbone, focusing on how this interaction impacts classification robustness. In each aspect, we consider the impact of six factors on CLIP models: model architecture, training distribution, training set size, fine-tuning, contrastive loss, and test-time prompts. Our study uncovers several previously unknown insights into CLIP. For instance, the architecture of the visual encoder in CLIP plays a significant role in their robustness against 3D corruption. CLIP models tend to exhibit a bias towards shape when making predictions. Moreover, this bias tends to diminish after fine-tuning on ImageNet. Vision-language models like LLaVA, leveraging the CLIP vision encoder, could exhibit benefits in classification performance for challenging categories over CLIP alone. Our findings are poised to offer valuable guidance for enhancing the robustness and reliability of CLIP models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.01534v1-abstract-full').style.display = 'none'; document.getElementById('2410.01534v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">17 pages, 10 figures, extension of NeurIPS&#39;23 work: A Closer Look at the Robustness of Contrastive Language-Image Pre-Training (CLIP). arXiv admin note: text overlap with arXiv:2402.07410</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.15893">arXiv:2409.15893</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.15893">pdf</a>, <a href="https://arxiv.org/format/2409.15893">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Unsupervised Attention Regularization Based Domain Adaptation for Oracle Character Recognition </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+M">Mei Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weihong Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+J">Jiani Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Su%2C+S">Sen Su</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.15893v1-abstract-short" style="display: inline;"> The study of oracle characters plays an important role in Chinese archaeology and philology. However, the difficulty of collecting and annotating real-world scanned oracle characters hinders the development of oracle character recognition. In this paper, we develop a novel unsupervised domain adaptation (UDA) method, i.e., unsupervised attention regularization net?work (UARN), to transfer recognit&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.15893v1-abstract-full').style.display = 'inline'; document.getElementById('2409.15893v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.15893v1-abstract-full" style="display: none;"> The study of oracle characters plays an important role in Chinese archaeology and philology. However, the difficulty of collecting and annotating real-world scanned oracle characters hinders the development of oracle character recognition. In this paper, we develop a novel unsupervised domain adaptation (UDA) method, i.e., unsupervised attention regularization net?work (UARN), to transfer recognition knowledge from labeled handprinted oracle characters to unlabeled scanned data. First, we experimentally prove that existing UDA methods are not always consistent with human priors and cannot achieve optimal performance on the target domain. For these oracle characters with flip-insensitivity and high inter-class similarity, model interpretations are not flip-consistent and class-separable. To tackle this challenge, we take into consideration visual perceptual plausibility when adapting. Specifically, our method enforces attention consistency between the original and flipped images to achieve the model robustness to flipping. Simultaneously, we constrain attention separability between the pseudo class and the most confusing class to improve the model discriminability. Extensive experiments demonstrate that UARN shows better interpretability and achieves state-of-the-art performance on Oracle-241 dataset, substantially outperforming the previously structure-texture separation network by 8.5%. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.15893v1-abstract-full').style.display = 'none'; document.getElementById('2409.15893v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.11684">arXiv:2409.11684</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.11684">pdf</a>, <a href="https://arxiv.org/format/2409.11684">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Recurrent Interpolants for Probabilistic Time Series Prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Yu Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Bilo%C5%A1%2C+M">Marin Bilo拧</a>, <a href="/search/cs?searchtype=author&amp;query=Mittal%2C+S">Sarthak Mittal</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Wei Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Rasul%2C+K">Kashif Rasul</a>, <a href="/search/cs?searchtype=author&amp;query=Schneider%2C+A">Anderson Schneider</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.11684v2-abstract-short" style="display: inline;"> Sequential models like recurrent neural networks and transformers have become standard for probabilistic multivariate time series forecasting across various domains. Despite their strengths, they struggle with capturing high-dimensional distributions and cross-feature dependencies. Recent work explores generative approaches using diffusion or flow-based models, extending to time series imputation&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.11684v2-abstract-full').style.display = 'inline'; document.getElementById('2409.11684v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.11684v2-abstract-full" style="display: none;"> Sequential models like recurrent neural networks and transformers have become standard for probabilistic multivariate time series forecasting across various domains. Despite their strengths, they struggle with capturing high-dimensional distributions and cross-feature dependencies. Recent work explores generative approaches using diffusion or flow-based models, extending to time series imputation and forecasting. However, scalability remains a challenge. This work proposes a novel method combining recurrent neural networks&#39; efficiency with diffusion models&#39; probabilistic modeling, based on stochastic interpolants and conditional generation with control features, offering insights for future developments in this dynamic field. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.11684v2-abstract-full').style.display = 'none'; document.getElementById('2409.11684v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.05286">arXiv:2409.05286</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.05286">pdf</a>, <a href="https://arxiv.org/format/2409.05286">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Seek and Solve Reasoning for Table Question Answering </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+R">Ruya Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+C">Chun Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weihong Deng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.05286v1-abstract-short" style="display: inline;"> Table-based Question Answering (TQA) involves answering questions based on tabular data. The complexity of table structures and question logic makes this task difficult even for Large Language Models (LLMs). This paper improves TQA performance by leveraging LLMs&#39; reasoning capabilities. Inspired by how humans solve TQA tasks, we propose a Seek-and-Solve pipeline that instructs the LLM to first see&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.05286v1-abstract-full').style.display = 'inline'; document.getElementById('2409.05286v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.05286v1-abstract-full" style="display: none;"> Table-based Question Answering (TQA) involves answering questions based on tabular data. The complexity of table structures and question logic makes this task difficult even for Large Language Models (LLMs). This paper improves TQA performance by leveraging LLMs&#39; reasoning capabilities. Inspired by how humans solve TQA tasks, we propose a Seek-and-Solve pipeline that instructs the LLM to first seek relevant information and then answer questions. The two stages are integrated at the reasoning level, and their Chain of Thought (CoT) paths are integrated into a coherent Seek-and-Solve CoT (SS-CoT). Furthermore, we present a compact single-stage TQA-solving prompt distilled from the pipeline. Experiments demonstrate that under In-Context Learning settings, using samples with SS-CoT paths as demonstrations, the TQA-solving prompt can effectively guide the LLM to solve complex TQA tasks, resulting in improved performance and reliability. Our results highlight the importance of properly eliciting LLMs&#39; reasoning capabilities in solving complex TQA tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.05286v1-abstract-full').style.display = 'none'; document.getElementById('2409.05286v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.11554">arXiv:2408.11554</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.11554">pdf</a>, <a href="https://arxiv.org/format/2408.11554">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Differentiating Choices via Commonality for Multiple-Choice Question Answering </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Wenqing Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhe Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+K">Kewen Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+S">Shirui Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+X">Xiaowang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+Z">Zhiyong Feng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.11554v1-abstract-short" style="display: inline;"> Multiple-choice question answering (MCQA) becomes particularly challenging when all choices are relevant to the question and are semantically similar. Yet this setting of MCQA can potentially provide valuable clues for choosing the right answer. Existing models often rank each choice separately, overlooking the context provided by other choices. Specifically, they fail to leverage the semantic com&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.11554v1-abstract-full').style.display = 'inline'; document.getElementById('2408.11554v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.11554v1-abstract-full" style="display: none;"> Multiple-choice question answering (MCQA) becomes particularly challenging when all choices are relevant to the question and are semantically similar. Yet this setting of MCQA can potentially provide valuable clues for choosing the right answer. Existing models often rank each choice separately, overlooking the context provided by other choices. Specifically, they fail to leverage the semantic commonalities and nuances among the choices for reasoning. In this paper, we propose a novel MCQA model by differentiating choices through identifying and eliminating their commonality, called DCQA. Our model captures token-level attention of each choice to the question, and separates tokens of the question attended to by all the choices (i.e., commonalities) from those by individual choices (i.e., nuances). Using the nuances as refined contexts for the choices, our model can effectively differentiate choices with subtle differences and provide justifications for choosing the correct answer. We conduct comprehensive experiments across five commonly used MCQA benchmarks, demonstrating that DCQA consistently outperforms baseline models. Furthermore, our case study illustrates the effectiveness of the approach in directing the attention of the model to more differentiating features. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.11554v1-abstract-full').style.display = 'none'; document.getElementById('2408.11554v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, accepted to ECAI 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.10614">arXiv:2408.10614</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.10614">pdf</a>, <a href="https://arxiv.org/format/2408.10614">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Generalizable Facial Expression Recognition </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yuhang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+X">Xiuqi Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Liang%2C+C">Chenyi Liang</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+J">Jiani Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weihong Deng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.10614v1-abstract-short" style="display: inline;"> SOTA facial expression recognition (FER) methods fail on test sets that have domain gaps with the train set. Recent domain adaptation FER methods need to acquire labeled or unlabeled samples of target domains to fine-tune the FER model, which might be infeasible in real-world deployment. In this paper, we aim to improve the zero-shot generalization ability of FER methods on different unseen test s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.10614v1-abstract-full').style.display = 'inline'; document.getElementById('2408.10614v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.10614v1-abstract-full" style="display: none;"> SOTA facial expression recognition (FER) methods fail on test sets that have domain gaps with the train set. Recent domain adaptation FER methods need to acquire labeled or unlabeled samples of target domains to fine-tune the FER model, which might be infeasible in real-world deployment. In this paper, we aim to improve the zero-shot generalization ability of FER methods on different unseen test sets using only one train set. Inspired by how humans first detect faces and then select expression features, we propose a novel FER pipeline to extract expression-related features from any given face images. Our method is based on the generalizable face features extracted by large models like CLIP. However, it is non-trivial to adapt the general features of CLIP for specific tasks like FER. To preserve the generalization ability of CLIP and the high precision of the FER model, we design a novel approach that learns sigmoid masks based on the fixed CLIP face features to extract expression features. To further improve the generalization ability on unseen test sets, we separate the channels of the learned masked features according to the expression classes to directly generate logits and avoid using the FC layer to reduce overfitting. We also introduce a channel-diverse loss to make the learned masks separated. Extensive experiments on five different FER datasets verify that our method outperforms SOTA FER methods by large margins. Code is available in https://github.com/zyh-uaiaaaa/Generalizable-FER. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.10614v1-abstract-full').style.display = 'none'; document.getElementById('2408.10614v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by ECCV2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.07516">arXiv:2408.07516</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.07516">pdf</a>, <a href="https://arxiv.org/format/2408.07516">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> DIffSteISR: Harnessing Diffusion Prior for Superior Real-world Stereo Image Super-Resolution </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Y">Yuanbo Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+X">Xinlin Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Wei Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+T">Tao Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+T">Tao Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+Q">Qinquan Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Tong%2C+T">Tong Tong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.07516v2-abstract-short" style="display: inline;"> We introduce DiffSteISR, a pioneering framework for reconstructing real-world stereo images. DiffSteISR utilizes the powerful prior knowledge embedded in pre-trained text-to-image model to efficiently recover the lost texture details in low-resolution stereo images. Specifically, DiffSteISR implements a time-aware stereo cross attention with temperature adapter (TASCATA) to guide the diffusion pro&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.07516v2-abstract-full').style.display = 'inline'; document.getElementById('2408.07516v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.07516v2-abstract-full" style="display: none;"> We introduce DiffSteISR, a pioneering framework for reconstructing real-world stereo images. DiffSteISR utilizes the powerful prior knowledge embedded in pre-trained text-to-image model to efficiently recover the lost texture details in low-resolution stereo images. Specifically, DiffSteISR implements a time-aware stereo cross attention with temperature adapter (TASCATA) to guide the diffusion process, ensuring that the generated left and right views exhibit high texture consistency thereby reducing disparity error between the super-resolved images and the ground truth (GT) images. Additionally, a stereo omni attention control network (SOA ControlNet) is proposed to enhance the consistency of super-resolved images with GT images in the pixel, perceptual, and distribution space. Finally, DiffSteISR incorporates a stereo semantic extractor (SSE) to capture unique viewpoint soft semantic information and shared hard tag semantic information, thereby effectively improving the semantic accuracy and consistency of the generated left and right images. Extensive experimental results demonstrate that DiffSteISR accurately reconstructs natural and precise textures from low-resolution stereo images while maintaining a high consistency of semantic and texture between the left and right views. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.07516v2-abstract-full').style.display = 'none'; document.getElementById('2408.07516v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.01057">arXiv:2408.01057</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.01057">pdf</a>, <a href="https://arxiv.org/format/2408.01057">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Supporting Industry Computing Researchers in Assessing, Articulating, and Addressing the Potential Negative Societal Impact of Their Work </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W+H">Wesley Hanwen Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Barocas%2C+S">Solon Barocas</a>, <a href="/search/cs?searchtype=author&amp;query=Vaughan%2C+J+W">Jennifer Wortman Vaughan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.01057v2-abstract-short" style="display: inline;"> Recent years have witnessed increasing calls for computing researchers to grapple with the societal impacts of their work. Tools such as impact assessments have gained prominence as a method to uncover potential impacts, and a number of publication venues now encourage authors to include an impact statement in their submissions. Despite this push, little is known about the way researchers assess,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.01057v2-abstract-full').style.display = 'inline'; document.getElementById('2408.01057v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.01057v2-abstract-full" style="display: none;"> Recent years have witnessed increasing calls for computing researchers to grapple with the societal impacts of their work. Tools such as impact assessments have gained prominence as a method to uncover potential impacts, and a number of publication venues now encourage authors to include an impact statement in their submissions. Despite this push, little is known about the way researchers assess, articulate, and address the potential negative societal impact of their work -- especially in industry settings, where research outcomes are often quickly integrated into products. In addition, while there are nascent efforts to support researchers in this task, there remains a dearth of empirically-informed tools and processes. Through interviews with 25 industry computing researchers across different companies and research areas, we first identify four key factors that influence how they grapple with (or choose not to grapple with) the societal impact of their research. To develop an effective impact assessment template tailored to industry computing researchers&#39; needs, we conduct an iterative co-design process with these 25 industry researchers and an additional 16 researchers and practitioners with prior experience and expertise in reviewing and developing impact assessments or broad responsible computing practices. Through the co-design process, we develop 10 design considerations to facilitate the effective design, development, and adaptation of an impact assessment template for use in industry research settings and beyond, as well as our own ``Societal Impact Assessment&#39;&#39; template with concrete scaffolds. We explore the effectiveness of this template through a user study with 15 industry research interns, revealing both its strengths and limitations. Finally, we discuss the implications for future researchers and organizations seeking to foster more responsible research practices. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.01057v2-abstract-full').style.display = 'none'; document.getElementById('2408.01057v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.19434">arXiv:2407.19434</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.19434">pdf</a>, <a href="https://arxiv.org/format/2407.19434">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> FINER++: Building a Family of Variable-periodic Functions for Activating Implicit Neural Representation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hao Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zhen Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Q">Qi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Fu%2C+J">Jingde Fu</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weibing Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+Z">Zhan Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+Y">Yanwen Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Cao%2C+X">Xun Cao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.19434v1-abstract-short" style="display: inline;"> Implicit Neural Representation (INR), which utilizes a neural network to map coordinate inputs to corresponding attributes, is causing a revolution in the field of signal processing. However, current INR techniques suffer from the &#34;frequency&#34;-specified spectral bias and capacity-convergence gap, resulting in imperfect performance when representing complex signals with multiple &#34;frequencies&#34;. We ha&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.19434v1-abstract-full').style.display = 'inline'; document.getElementById('2407.19434v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.19434v1-abstract-full" style="display: none;"> Implicit Neural Representation (INR), which utilizes a neural network to map coordinate inputs to corresponding attributes, is causing a revolution in the field of signal processing. However, current INR techniques suffer from the &#34;frequency&#34;-specified spectral bias and capacity-convergence gap, resulting in imperfect performance when representing complex signals with multiple &#34;frequencies&#34;. We have identified that both of these two characteristics could be handled by increasing the utilization of definition domain in current activation functions, for which we propose the FINER++ framework by extending existing periodic/non-periodic activation functions to variable-periodic ones. By initializing the bias of the neural network with different ranges, sub-functions with various frequencies in the variable-periodic function are selected for activation. Consequently, the supported frequency set can be flexibly tuned, leading to improved performance in signal representation. We demonstrate the generalization and capabilities of FINER++ with different activation function backbones (Sine, Gauss. and Wavelet) and various tasks (2D image fitting, 3D signed distance field representation, 5D neural radiance fields optimization and streamable INR transmission), and we show that it improves existing INRs. Project page: {https://liuzhen0212.github.io/finerpp/} <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.19434v1-abstract-full').style.display = 'none'; document.getElementById('2407.19434v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Extension of previous CVPR paper &#34;FINER: Flexible spectral-bias tuning in implicit neural representation by variable-periodic activation functions&#34;. arXiv admin note: substantial text overlap with arXiv:2312.02434</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.13161">arXiv:2407.13161</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.13161">pdf</a>, <a href="https://arxiv.org/format/2407.13161">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Physics Education">physics.ed-ph</span> </div> </div> <p class="title is-5 mathjax"> How to quantify an examination? Evidence from physics examinations via complex networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xia%2C+M">Min Xia</a>, <a href="/search/cs?searchtype=author&amp;query=Su%2C+Z">Zhu Su</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weibing Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+X">Xiumei Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+B">Benwei Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.13161v1-abstract-short" style="display: inline;"> Given the untapped potential for continuous improvement of examinations, quantitative investigations of examinations could guide efforts to considerably improve learning efficiency and evaluation and thus greatly help both learners and educators. However, there is a general lack of quantitative methods for investigating examinations. To address this gap, we propose a new metric via complex network&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.13161v1-abstract-full').style.display = 'inline'; document.getElementById('2407.13161v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.13161v1-abstract-full" style="display: none;"> Given the untapped potential for continuous improvement of examinations, quantitative investigations of examinations could guide efforts to considerably improve learning efficiency and evaluation and thus greatly help both learners and educators. However, there is a general lack of quantitative methods for investigating examinations. To address this gap, we propose a new metric via complex networks; i.e., the knowledge point network (KPN) of an examination is constructed by representing the knowledge points (concepts, laws, etc.) as nodes and adding links when these points appear in the same question. Then, the topological quantities of KPNs, such as degree, centrality, and community, can be employed to systematically explore the structural properties and evolution of examinations. In this work, 35 physics examinations from the NCEE examination spanning from 2006 to 2020 were investigated as an evidence. We found that the constructed KPNs are scale-free networks that show strong assortativity and small-world effects in most cases. The communities within the KPNs are obvious, and the key nodes are mainly related to mechanics and electromagnetism. Different question types are related to specific knowledge points, leading to noticeable structural variations in KPNs. Moreover, changes in the KPN topology between examinations administered in different years may offer insights guiding college entrance examination reforms. Based on topological quantities such as the average degree, network density, average clustering coefficient, and network transitivity, the Fd is proposed to evaluate examination difficulty. All the above results show that our approach can comprehensively quantify the knowledge structures and examination characteristics. These networks may elucidate comprehensive examination knowledge graphs for educators and guide improvements in teaching. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.13161v1-abstract-full').style.display = 'none'; document.getElementById('2407.13161v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.12580">arXiv:2407.12580</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.12580">pdf</a>, <a href="https://arxiv.org/format/2407.12580">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> </div> </div> <p class="title is-5 mathjax"> E5-V: Universal Embeddings with Multimodal Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+T">Ting Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+M">Minghui Song</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zihan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+H">Haizhen Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weiwei Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+F">Feng Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Q">Qi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+D">Deqing Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhuang%2C+F">Fuzhen Zhuang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.12580v1-abstract-short" style="display: inline;"> Multimodal large language models (MLLMs) have shown promising advancements in general visual and language understanding. However, the representation of multimodal information using MLLMs remains largely unexplored. In this work, we introduce a new framework, E5-V, designed to adapt MLLMs for achieving universal multimodal embeddings. Our findings highlight the significant potential of MLLMs in rep&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.12580v1-abstract-full').style.display = 'inline'; document.getElementById('2407.12580v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.12580v1-abstract-full" style="display: none;"> Multimodal large language models (MLLMs) have shown promising advancements in general visual and language understanding. However, the representation of multimodal information using MLLMs remains largely unexplored. In this work, we introduce a new framework, E5-V, designed to adapt MLLMs for achieving universal multimodal embeddings. Our findings highlight the significant potential of MLLMs in representing multimodal inputs compared to previous approaches. By leveraging MLLMs with prompts, E5-V effectively bridges the modality gap between different types of inputs, demonstrating strong performance in multimodal embeddings even without fine-tuning. We propose a single modality training approach for E5-V, where the model is trained exclusively on text pairs. This method demonstrates significant improvements over traditional multimodal training on image-text pairs, while reducing training costs by approximately 95%. Additionally, this approach eliminates the need for costly multimodal training data collection. Extensive experiments across four types of tasks demonstrate the effectiveness of E5-V. As a universal multimodal model, E5-V not only achieves but often surpasses state-of-the-art performance in each task, despite being trained on a single modality. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.12580v1-abstract-full').style.display = 'none'; document.getElementById('2407.12580v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Code and models are available at https://github.com/kongds/E5-V</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.07780">arXiv:2407.07780</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.07780">pdf</a>, <a href="https://arxiv.org/format/2407.07780">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Cross Domain Object Detection via Multi-Granularity Confidence Alignment based Mean Teacher </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jiangming Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+L">Li Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Wanxia Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zhen Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yu Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Wei%2C+Y">Yingmei Wei</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yongxiang Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.07780v1-abstract-short" style="display: inline;"> Cross domain object detection learns an object detector for an unlabeled target domain by transferring knowledge from an annotated source domain. Promising results have been achieved via Mean Teacher, however, pseudo labeling which is the bottleneck of mutual learning remains to be further explored. In this study, we find that confidence misalignment of the predictions, including category-level ov&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.07780v1-abstract-full').style.display = 'inline'; document.getElementById('2407.07780v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.07780v1-abstract-full" style="display: none;"> Cross domain object detection learns an object detector for an unlabeled target domain by transferring knowledge from an annotated source domain. Promising results have been achieved via Mean Teacher, however, pseudo labeling which is the bottleneck of mutual learning remains to be further explored. In this study, we find that confidence misalignment of the predictions, including category-level overconfidence, instance-level task confidence inconsistency, and image-level confidence misfocusing, leading to the injection of noisy pseudo label in the training process, will bring suboptimal performance on the target domain. To tackle this issue, we present a novel general framework termed Multi-Granularity Confidence Alignment Mean Teacher (MGCAMT) for cross domain object detection, which alleviates confidence misalignment across category-, instance-, and image-levels simultaneously to obtain high quality pseudo supervision for better teacher-student learning. Specifically, to align confidence with accuracy at category level, we propose Classification Confidence Alignment (CCA) to model category uncertainty based on Evidential Deep Learning (EDL) and filter out the category incorrect labels via an uncertainty-aware selection strategy. Furthermore, to mitigate the instance-level misalignment between classification and localization, we design Task Confidence Alignment (TCA) to enhance the interaction between the two task branches and allow each classification feature to adaptively locate the optimal feature for the regression. Finally, we develop imagery Focusing Confidence Alignment (FCA) adopting another way of pseudo label learning, i.e., we use the original outputs from the Mean Teacher network for supervised learning without label assignment to concentrate on holistic information in the target image. These three procedures benefit from each other from a cooperative learning perspective. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.07780v1-abstract-full').style.display = 'none'; document.getElementById('2407.07780v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.06935">arXiv:2407.06935</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.06935">pdf</a>, <a href="https://arxiv.org/format/2407.06935">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation">stat.CO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Bayesian Federated Learning with Hamiltonian Monte Carlo: Algorithm and Theory </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liang%2C+J">Jiajun Liang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Q">Qian Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Wei Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+Q">Qifan Song</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+G">Guang Lin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.06935v1-abstract-short" style="display: inline;"> This work introduces a novel and efficient Bayesian federated learning algorithm, namely, the Federated Averaging stochastic Hamiltonian Monte Carlo (FA-HMC), for parameter estimation and uncertainty quantification. We establish rigorous convergence guarantees of FA-HMC on non-iid distributed data sets, under the strong convexity and Hessian smoothness assumptions. Our analysis investigates the ef&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.06935v1-abstract-full').style.display = 'inline'; document.getElementById('2407.06935v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.06935v1-abstract-full" style="display: none;"> This work introduces a novel and efficient Bayesian federated learning algorithm, namely, the Federated Averaging stochastic Hamiltonian Monte Carlo (FA-HMC), for parameter estimation and uncertainty quantification. We establish rigorous convergence guarantees of FA-HMC on non-iid distributed data sets, under the strong convexity and Hessian smoothness assumptions. Our analysis investigates the effects of parameter space dimension, noise on gradients and momentum, and the frequency of communication (between the central node and local nodes) on the convergence and communication costs of FA-HMC. Beyond that, we establish the tightness of our analysis by showing that the convergence rate cannot be improved even for continuous FA-HMC process. Moreover, extensive empirical studies demonstrate that FA-HMC outperforms the existing Federated Averaging-Langevin Monte Carlo (FA-LD) algorithm. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.06935v1-abstract-full').style.display = 'none'; document.getElementById('2407.06935v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.03598">arXiv:2407.03598</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.03598">pdf</a>, <a href="https://arxiv.org/format/2407.03598">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> ASteISR: Adapting Single Image Super-resolution Pre-trained Model for Efficient Stereo Image Super-resolution </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Y">Yuanbo Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Xue%2C+Y">Yuyang Xue</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Wei Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+X">Xinlin Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+Q">Qinquan Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Tong%2C+T">Tong Tong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.03598v1-abstract-short" style="display: inline;"> Despite advances in the paradigm of pre-training then fine-tuning in low-level vision tasks, significant challenges persist particularly regarding the increased size of pre-trained models such as memory usage and training time. Another concern often encountered is the unsatisfying results yielded when directly applying pre-trained single-image models to multi-image domain. In this paper, we propos&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.03598v1-abstract-full').style.display = 'inline'; document.getElementById('2407.03598v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.03598v1-abstract-full" style="display: none;"> Despite advances in the paradigm of pre-training then fine-tuning in low-level vision tasks, significant challenges persist particularly regarding the increased size of pre-trained models such as memory usage and training time. Another concern often encountered is the unsatisfying results yielded when directly applying pre-trained single-image models to multi-image domain. In this paper, we propose a efficient method for transferring a pre-trained single-image super-resolution (SISR) transformer network to the domain of stereo image super-resolution (SteISR) through a parameter-efficient fine-tuning (PEFT) method. Specifically, we introduce the concept of stereo adapters and spatial adapters which are incorporated into the pre-trained SISR transformer network. Subsequently, the pre-trained SISR model is frozen, enabling us to fine-tune the adapters using stereo datasets along. By adopting this training method, we enhance the ability of the SISR model to accurately infer stereo images by 0.79dB on the Flickr1024 dataset. This method allows us to train only 4.8% of the original model parameters, achieving state-of-the-art performance on four commonly used SteISR benchmarks. Compared to the more complicated full fine-tuning approach, our method reduces training time and memory consumption by 57% and 15%, respectively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.03598v1-abstract-full').style.display = 'none'; document.getElementById('2407.03598v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.02886">arXiv:2407.02886</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.02886">pdf</a>, <a href="https://arxiv.org/format/2407.02886">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> A Wolf in Sheep&#39;s Clothing: Practical Black-box Adversarial Attacks for Evading Learning-based Windows Malware Detection in the Wild </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ling%2C+X">Xiang Ling</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Z">Zhiyu Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+B">Bin Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Wei Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+J">Jingzheng Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Ji%2C+S">Shouling Ji</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+T">Tianyue Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Y">Yanjun Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.02886v1-abstract-short" style="display: inline;"> Given the remarkable achievements of existing learning-based malware detection in both academia and industry, this paper presents MalGuise, a practical black-box adversarial attack framework that evaluates the security risks of existing learning-based Windows malware detection systems under the black-box setting. MalGuise first employs a novel semantics-preserving transformation of call-based redi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.02886v1-abstract-full').style.display = 'inline'; document.getElementById('2407.02886v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.02886v1-abstract-full" style="display: none;"> Given the remarkable achievements of existing learning-based malware detection in both academia and industry, this paper presents MalGuise, a practical black-box adversarial attack framework that evaluates the security risks of existing learning-based Windows malware detection systems under the black-box setting. MalGuise first employs a novel semantics-preserving transformation of call-based redividing to concurrently manipulate both nodes and edges of malware&#39;s control-flow graph, making it less noticeable. By employing a Monte-Carlo-tree-search-based optimization, MalGuise then searches for an optimized sequence of call-based redividing transformations to apply to the input Windows malware for evasions. Finally, it reconstructs the adversarial malware file based on the optimized transformation sequence while adhering to Windows executable format constraints, thereby maintaining the same semantics as the original. MalGuise is systematically evaluated against three state-of-the-art learning-based Windows malware detection systems under the black-box setting. Evaluation results demonstrate that MalGuise achieves a remarkably high attack success rate, mostly exceeding 95%, with over 91% of the generated adversarial malware files maintaining the same semantics. Furthermore, MalGuise achieves up to a 74.97% attack success rate against five anti-virus products, highlighting potential tangible security concerns to real-world users. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.02886v1-abstract-full').style.display = 'none'; document.getElementById('2407.02886v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This paper has been accepted by 33rd USENIX Security Symposium 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.18957">arXiv:2406.18957</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.18957">pdf</a>, <a href="https://arxiv.org/format/2406.18957">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Science and Game Theory">cs.GT</span> </div> </div> <p class="title is-5 mathjax"> A Treatment of EIP-1559: Enhancing Transaction Fee Mechanism through Nth-Price Auction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+K">Kun Li</a>, <a href="/search/cs?searchtype=author&amp;query=Qi%2C+G">Guangpeng Qi</a>, <a href="/search/cs?searchtype=author&amp;query=Shang%2C+G">Guangyong Shang</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Wanli Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+M">Minghui Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+X">Xiuzhen Cheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.18957v1-abstract-short" style="display: inline;"> With the widespread adoption of blockchain technology, the transaction fee mechanism (TFM) in blockchain systems has become a prominent research topic. An ideal TFM should satisfy user incentive compatibility (UIC), miner incentive compatibility (MIC), and miner-user side contract proofness ($c$-SCP). However, state-of-the-art works either fail to meet these three properties simultaneously or only&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.18957v1-abstract-full').style.display = 'inline'; document.getElementById('2406.18957v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.18957v1-abstract-full" style="display: none;"> With the widespread adoption of blockchain technology, the transaction fee mechanism (TFM) in blockchain systems has become a prominent research topic. An ideal TFM should satisfy user incentive compatibility (UIC), miner incentive compatibility (MIC), and miner-user side contract proofness ($c$-SCP). However, state-of-the-art works either fail to meet these three properties simultaneously or only satisfy them under certain conditions. In this paper, we propose a burning $N$-price auction TFM named BNP. This mechanism divides the transaction fee into a base fee, which is burned, and a priority fee, which is allocated to miners. Theoretical proofs and experimental analyses demonstrate that, even under conditions of significant transaction congestion, this mechanism satisfies UIC, MIC, and $c$-SCP simultaneously. Furthermore, the BNP mechanism is not constrained by the type of blockchain consensus, making it widely applicable. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.18957v1-abstract-full').style.display = 'none'; document.getElementById('2406.18957v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.11147">arXiv:2406.11147</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.11147">pdf</a>, <a href="https://arxiv.org/format/2406.11147">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Vul-RAG: Enhancing LLM-based Vulnerability Detection via Knowledge-level RAG </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Du%2C+X">Xueying Du</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+G">Geng Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+K">Kaixin Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+J">Jiayi Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Wentai Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+M">Mingwei Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+B">Bihuan Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Peng%2C+X">Xin Peng</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+T">Tao Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Lou%2C+Y">Yiling Lou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.11147v2-abstract-short" style="display: inline;"> Vulnerability detection is essential for software quality assurance. In recent years, deep learning models (especially large language models) have shown promise in vulnerability detection. In this work, we propose a novel LLM-based vulnerability detection technique Vul-RAG, which leverages knowledge-level retrieval-augmented generation (RAG) framework to detect vulnerability for the given code in&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.11147v2-abstract-full').style.display = 'inline'; document.getElementById('2406.11147v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.11147v2-abstract-full" style="display: none;"> Vulnerability detection is essential for software quality assurance. In recent years, deep learning models (especially large language models) have shown promise in vulnerability detection. In this work, we propose a novel LLM-based vulnerability detection technique Vul-RAG, which leverages knowledge-level retrieval-augmented generation (RAG) framework to detect vulnerability for the given code in three phases. First, Vul-RAG constructs a vulnerability knowledge base by extracting multi-dimension knowledge via LLMs from existing CVE instances; second, for a given code snippet, Vul-RAG} retrieves the relevant vulnerability knowledge from the constructed knowledge base based on functional semantics; third, Vul-RAG leverages LLMs to check the vulnerability of the given code snippet by reasoning the presence of vulnerability causes and fixing solutions of the retrieved vulnerability knowledge. Our evaluation of Vul-RAG on our constructed benchmark PairVul shows that Vul-RAG substantially outperforms all baselines by 12.96\%/110\% relative improvement in accuracy/pairwise-accuracy. In addition, our user study shows that the vulnerability knowledge generated by Vul-RAG can serve as high-quality explanations which can improve the manual detection accuracy from 0.60 to 0.77. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.11147v2-abstract-full').style.display = 'none'; document.getElementById('2406.11147v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.09908">arXiv:2406.09908</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.09908">pdf</a>, <a href="https://arxiv.org/format/2406.09908">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> What Does Softmax Probability Tell Us about Classifiers Ranking Across Diverse Test Conditions? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tu%2C+W">Weijie Tu</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weijian Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+L">Liang Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Gedeon%2C+T">Tom Gedeon</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.09908v1-abstract-short" style="display: inline;"> This work aims to develop a measure that can accurately rank the performance of various classifiers when they are tested on unlabeled data from out-of-distribution (OOD) distributions. We commence by demonstrating that conventional uncertainty metrics, notably the maximum Softmax prediction probability, possess inherent utility in forecasting model generalization across certain OOD contexts. Build&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.09908v1-abstract-full').style.display = 'inline'; document.getElementById('2406.09908v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.09908v1-abstract-full" style="display: none;"> This work aims to develop a measure that can accurately rank the performance of various classifiers when they are tested on unlabeled data from out-of-distribution (OOD) distributions. We commence by demonstrating that conventional uncertainty metrics, notably the maximum Softmax prediction probability, possess inherent utility in forecasting model generalization across certain OOD contexts. Building on this insight, we introduce a new measure called Softmax Correlation (SoftmaxCorr). It calculates the cosine similarity between a class-class correlation matrix, constructed from Softmax output vectors across an unlabeled test dataset, and a predefined reference matrix that embodies ideal class correlations. A high resemblance of predictions to the reference matrix signals that the model delivers confident and uniform predictions across all categories, reflecting minimal uncertainty and confusion. Through rigorous evaluation across a suite of datasets, including ImageNet, CIFAR-10, and WILDS, we affirm the predictive validity of SoftmaxCorr in accurately forecasting model performance within both in-distribution (ID) and OOD settings. Furthermore, we discuss the limitations of our proposed measure and suggest avenues for future research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.09908v1-abstract-full').style.display = 'none'; document.getElementById('2406.09908v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">TMLR 2024 (https://openreview.net/forum?id=vtiDUgGjyx)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.08772">arXiv:2406.08772</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.08772">pdf</a>, <a href="https://arxiv.org/format/2406.08772">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> MMFakeBench: A Mixed-Source Multimodal Misinformation Detection Benchmark for LVLMs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+X">Xuannan Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Z">Zekun Li</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+P">Peipei Li</a>, <a href="/search/cs?searchtype=author&amp;query=Xia%2C+S">Shuhan Xia</a>, <a href="/search/cs?searchtype=author&amp;query=Cui%2C+X">Xing Cui</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+L">Linzhi Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+H">Huaibo Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weihong Deng</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+Z">Zhaofeng He</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.08772v2-abstract-short" style="display: inline;"> Current multimodal misinformation detection (MMD) methods often assume a single source and type of forgery for each sample, which is insufficient for real-world scenarios where multiple forgery sources coexist. The lack of a benchmark for mixed-source misinformation has hindered progress in this field. To address this, we introduce MMFakeBench, the first comprehensive benchmark for mixed-source MM&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.08772v2-abstract-full').style.display = 'inline'; document.getElementById('2406.08772v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.08772v2-abstract-full" style="display: none;"> Current multimodal misinformation detection (MMD) methods often assume a single source and type of forgery for each sample, which is insufficient for real-world scenarios where multiple forgery sources coexist. The lack of a benchmark for mixed-source misinformation has hindered progress in this field. To address this, we introduce MMFakeBench, the first comprehensive benchmark for mixed-source MMD. MMFakeBench includes 3 critical sources: textual veracity distortion, visual veracity distortion, and cross-modal consistency distortion, along with 12 sub-categories of misinformation forgery types. We further conduct an extensive evaluation of 6 prevalent detection methods and 15 large vision-language models (LVLMs) on MMFakeBench under a zero-shot setting. The results indicate that current methods struggle under this challenging and realistic mixed-source MMD setting. Additionally, we propose an innovative unified framework, which integrates rationales, actions, and tool-use capabilities of LVLM agents, significantly enhancing accuracy and generalization. We believe this study will catalyze future research into more realistic mixed-source multimodal misinformation and provide a fair evaluation of misinformation detection methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.08772v2-abstract-full').style.display = 'none'; document.getElementById('2406.08772v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Project page: https://liuxuannan.github.io/MMFakeBench.github.io/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.18979">arXiv:2405.18979</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.18979">pdf</a>, <a href="https://arxiv.org/format/2405.18979">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> MANO: Exploiting Matrix Norm for Unsupervised Accuracy Estimation Under Distribution Shifts </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xie%2C+R">Renchunzi Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Odonnat%2C+A">Ambroise Odonnat</a>, <a href="/search/cs?searchtype=author&amp;query=Feofanov%2C+V">Vasilii Feofanov</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weijian Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Jianfeng Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=An%2C+B">Bo An</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.18979v3-abstract-short" style="display: inline;"> Leveraging the models&#39; outputs, specifically the logits, is a common approach to estimating the test accuracy of a pre-trained neural network on out-of-distribution (OOD) samples without requiring access to the corresponding ground truth labels. Despite their ease of implementation and computational efficiency, current logit-based methods are vulnerable to overconfidence issues, leading to predict&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.18979v3-abstract-full').style.display = 'inline'; document.getElementById('2405.18979v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.18979v3-abstract-full" style="display: none;"> Leveraging the models&#39; outputs, specifically the logits, is a common approach to estimating the test accuracy of a pre-trained neural network on out-of-distribution (OOD) samples without requiring access to the corresponding ground truth labels. Despite their ease of implementation and computational efficiency, current logit-based methods are vulnerable to overconfidence issues, leading to prediction bias, especially under the natural shift. In this work, we first study the relationship between logits and generalization performance from the view of low-density separation assumption. Our findings motivate our proposed method MaNo which (1) applies a data-dependent normalization on the logits to reduce prediction bias, and (2) takes the $L_p$ norm of the matrix of normalized logits as the estimation score. Our theoretical analysis highlights the connection between the provided score and the model&#39;s uncertainty. We conduct an extensive empirical study on common unsupervised accuracy estimation benchmarks and demonstrate that MaNo achieves state-of-the-art performance across various architectures in the presence of synthetic, natural, or subpopulation shifts. The code is available at \url{https://github.com/Renchunzi-Xie/MaNo}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.18979v3-abstract-full').style.display = 'none'; document.getElementById('2405.18979v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The three first authors contributed equally</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.14280">arXiv:2405.14280</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.14280">pdf</a>, <a href="https://arxiv.org/format/2405.14280">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> </div> </div> <p class="title is-5 mathjax"> ASI++: Towards Distributionally Balanced End-to-End Generative Retrieval </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yuxuan Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+T">Tianchi Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zihan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+M">Minghui Song</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+H">Haizhen Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weiwei Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+F">Feng Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Q">Qi Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.14280v1-abstract-short" style="display: inline;"> Generative retrieval, a promising new paradigm in information retrieval, employs a seq2seq model to encode document features into parameters and decode relevant document identifiers (IDs) based on search queries. Existing generative retrieval solutions typically rely on a preprocessing stage to pre-define document IDs, which can suffer from a semantic gap between these IDs and the retrieval task.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.14280v1-abstract-full').style.display = 'inline'; document.getElementById('2405.14280v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.14280v1-abstract-full" style="display: none;"> Generative retrieval, a promising new paradigm in information retrieval, employs a seq2seq model to encode document features into parameters and decode relevant document identifiers (IDs) based on search queries. Existing generative retrieval solutions typically rely on a preprocessing stage to pre-define document IDs, which can suffer from a semantic gap between these IDs and the retrieval task. However, end-to-end training for both ID assignments and retrieval tasks is challenging due to the long-tailed distribution characteristics of real-world data, resulting in inefficient and unbalanced ID space utilization. To address these issues, we propose ASI++, a novel fully end-to-end generative retrieval method that aims to simultaneously learn balanced ID assignments and improve retrieval performance. ASI++ builds on the fully end-to-end training framework of vanilla ASI and introduces several key innovations. First, a distributionally balanced criterion addresses the imbalance in ID assignments, promoting more efficient utilization of the ID space. Next, a representation bottleneck criterion enhances dense representations to alleviate bottlenecks in learning ID assignments. Finally, an information consistency criterion integrates these processes into a joint optimization framework grounded in information theory. We further explore various module structures for learning ID assignments, including neural quantization, differentiable product quantization, and residual quantization. Extensive experiments on both public and industrial datasets demonstrate the effectiveness of ASI++ in improving retrieval performance and achieving balanced ID assignments. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.14280v1-abstract-full').style.display = 'none'; document.getElementById('2405.14280v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.12130">arXiv:2405.12130</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.12130">pdf</a>, <a href="https://arxiv.org/format/2405.12130">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> MoRA: High-Rank Updating for Parameter-Efficient Fine-Tuning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+T">Ting Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+S">Shaohan Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+S">Shengyue Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zihan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+H">Haizhen Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Wei%2C+F">Furu Wei</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weiwei Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+F">Feng Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Q">Qi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+D">Deqing Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhuang%2C+F">Fuzhen Zhuang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.12130v1-abstract-short" style="display: inline;"> Low-rank adaptation is a popular parameter-efficient fine-tuning method for large language models. In this paper, we analyze the impact of low-rank updating, as implemented in LoRA. Our findings suggest that the low-rank updating mechanism may limit the ability of LLMs to effectively learn and memorize new knowledge. Inspired by this observation, we propose a new method called MoRA, which employs&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.12130v1-abstract-full').style.display = 'inline'; document.getElementById('2405.12130v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.12130v1-abstract-full" style="display: none;"> Low-rank adaptation is a popular parameter-efficient fine-tuning method for large language models. In this paper, we analyze the impact of low-rank updating, as implemented in LoRA. Our findings suggest that the low-rank updating mechanism may limit the ability of LLMs to effectively learn and memorize new knowledge. Inspired by this observation, we propose a new method called MoRA, which employs a square matrix to achieve high-rank updating while maintaining the same number of trainable parameters. To achieve it, we introduce the corresponding non-parameter operators to reduce the input dimension and increase the output dimension for the square matrix. Furthermore, these operators ensure that the weight can be merged back into LLMs, which makes our method can be deployed like LoRA. We perform a comprehensive evaluation of our method across five tasks: instruction tuning, mathematical reasoning, continual pretraining, memory and pretraining. Our method outperforms LoRA on memory-intensive tasks and achieves comparable performance on other tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.12130v1-abstract-full').style.display = 'none'; document.getElementById('2405.12130v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Work in Progress</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.07839">arXiv:2405.07839</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.07839">pdf</a>, <a href="https://arxiv.org/format/2405.07839">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Constrained Exploration via Reflected Replica Exchange Stochastic Gradient Langevin Dynamics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+H">Haoyang Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+H">Hengrong Du</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+Q">Qi Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Wei Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+G">Guang Lin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.07839v2-abstract-short" style="display: inline;"> Replica exchange stochastic gradient Langevin dynamics (reSGLD) is an effective sampler for non-convex learning in large-scale datasets. However, the simulation may encounter stagnation issues when the high-temperature chain delves too deeply into the distribution tails. To tackle this issue, we propose reflected reSGLD (r2SGLD): an algorithm tailored for constrained non-convex exploration by util&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.07839v2-abstract-full').style.display = 'inline'; document.getElementById('2405.07839v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.07839v2-abstract-full" style="display: none;"> Replica exchange stochastic gradient Langevin dynamics (reSGLD) is an effective sampler for non-convex learning in large-scale datasets. However, the simulation may encounter stagnation issues when the high-temperature chain delves too deeply into the distribution tails. To tackle this issue, we propose reflected reSGLD (r2SGLD): an algorithm tailored for constrained non-convex exploration by utilizing reflection steps within a bounded domain. Theoretically, we observe that reducing the diameter of the domain enhances mixing rates, exhibiting a $\textit{quadratic}$ behavior. Empirically, we test its performance through extensive experiments, including identifying dynamical systems with physical constraints, simulations of constrained multi-modal distributions, and image classification tasks. The theoretical and empirical findings highlight the crucial role of constrained exploration in improving the simulation efficiency. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.07839v2-abstract-full').style.display = 'none'; document.getElementById('2405.07839v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">28 pages, 13 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.04795">arXiv:2405.04795</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.04795">pdf</a>, <a href="https://arxiv.org/format/2405.04795">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Variational Schr枚dinger Diffusion Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Wei Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Luo%2C+W">Weijian Luo</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+Y">Yixin Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Bilo%C5%A1%2C+M">Marin Bilo拧</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Yu Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Nevmyvaka%2C+Y">Yuriy Nevmyvaka</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+R+T+Q">Ricky T. Q. Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.04795v4-abstract-short" style="display: inline;"> Schr枚dinger bridge (SB) has emerged as the go-to method for optimizing transportation plans in diffusion models. However, SB requires estimating the intractable forward score functions, inevitably resulting in the costly implicit training loss based on simulated trajectories. To improve the scalability while preserving efficient transportation plans, we leverage variational inference to linearize&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.04795v4-abstract-full').style.display = 'inline'; document.getElementById('2405.04795v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.04795v4-abstract-full" style="display: none;"> Schr枚dinger bridge (SB) has emerged as the go-to method for optimizing transportation plans in diffusion models. However, SB requires estimating the intractable forward score functions, inevitably resulting in the costly implicit training loss based on simulated trajectories. To improve the scalability while preserving efficient transportation plans, we leverage variational inference to linearize the forward score functions (variational scores) of SB and restore simulation-free properties in training backward scores. We propose the variational Schr枚dinger diffusion model (VSDM), where the forward process is a multivariate diffusion and the variational scores are adaptively optimized for efficient transport. Theoretically, we use stochastic approximation to prove the convergence of the variational scores and show the convergence of the adaptively generated samples based on the optimal variational scores. Empirically, we test the algorithm in simulated examples and observe that VSDM is efficient in generations of anisotropic shapes and yields straighter sample trajectories compared to the single-variate diffusion. We also verify the scalability of the algorithm in real-world data and achieve competitive unconditional generation performance in CIFAR10 and conditional generation in time series modeling. Notably, VSDM no longer depends on warm-up initializations and has become tuning-friendly in training large-scale experiments. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.04795v4-abstract-full').style.display = 'none'; document.getElementById('2405.04795v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ICML 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.02241">arXiv:2405.02241</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.02241">pdf</a>, <a href="https://arxiv.org/format/2405.02241">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> WeightedPose: Generalizable Cross-Pose Estimation via Weighted SVD </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+X">Xuxin Cheng</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+H">Heng Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H">Harry Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Wenxing Deng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.02241v2-abstract-short" style="display: inline;"> We introduce a new approach for robotic manipulation tasks in human settings that necessitates understanding the 3D geometric connections between a pair of objects. Conventional end-to-end training approaches, which convert pixel observations directly into robot actions, often fail to effectively understand complex pose relationships and do not easily adapt to new object configurations. To overcom&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.02241v2-abstract-full').style.display = 'inline'; document.getElementById('2405.02241v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.02241v2-abstract-full" style="display: none;"> We introduce a new approach for robotic manipulation tasks in human settings that necessitates understanding the 3D geometric connections between a pair of objects. Conventional end-to-end training approaches, which convert pixel observations directly into robot actions, often fail to effectively understand complex pose relationships and do not easily adapt to new object configurations. To overcome these issues, our method focuses on learning the 3D geometric relationships, particularly how critical parts of one object relate to those of another. We employ Weighted SVD in our standalone model to analyze pose relationships both in articulated parts and in free-floating objects. For instance, our model can comprehend the spatial relationship between an oven door and the oven body, as well as between a lasagna plate and the oven. By concentrating on the 3D geometric connections, our strategy empowers robots to carry out intricate manipulation tasks based on object-centric perspectives <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.02241v2-abstract-full').style.display = 'none'; document.getElementById('2405.02241v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: text overlap with arXiv:2211.09325</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.17227">arXiv:2404.17227</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.17227">pdf</a>, <a href="https://arxiv.org/format/2404.17227">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="General Economics">econ.GN</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Engineering, Finance, and Science">cs.CE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Risk Management">q-fin.RM</span> </div> </div> <p class="title is-5 mathjax"> Trust Dynamics and Market Behavior in Cryptocurrency: A Comparative Study of Centralized and Decentralized Exchanges </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wu%2C+X">Xintong Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Wanling Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Quan%2C+Y">Yuotng Quan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+L">Luyao Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.17227v1-abstract-short" style="display: inline;"> In the evolving landscape of digital finance, the transition from centralized to decentralized trust mechanisms, primarily driven by blockchain technology, plays a critical role in shaping the cryptocurrency ecosystem. This paradigm shift raises questions about the traditional reliance on centralized trust and introduces a novel, decentralized trust framework built upon distributed networks. Our r&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.17227v1-abstract-full').style.display = 'inline'; document.getElementById('2404.17227v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.17227v1-abstract-full" style="display: none;"> In the evolving landscape of digital finance, the transition from centralized to decentralized trust mechanisms, primarily driven by blockchain technology, plays a critical role in shaping the cryptocurrency ecosystem. This paradigm shift raises questions about the traditional reliance on centralized trust and introduces a novel, decentralized trust framework built upon distributed networks. Our research delves into the consequences of this shift, particularly focusing on how incidents influence trust within cryptocurrency markets, thereby affecting trade behaviors in centralized (CEXs) and decentralized exchanges (DEXs). We conduct a comprehensive analysis of various events, assessing their effects on market dynamics, including token valuation and trading volumes in both CEXs and DEXs. Our findings highlight the pivotal role of trust in directing user preferences and the fluidity of trust transfer between centralized and decentralized platforms. Despite certain anomalies, the results largely align with our initial hypotheses, revealing the intricate nature of user trust in cryptocurrency markets. This study contributes significantly to interdisciplinary research, bridging distributed systems, behavioral finance, and Decentralized Finance (DeFi). It offers valuable insights for the distributed computing community, particularly in understanding and applying distributed trust mechanisms in digital economies, paving the way for future research that could further explore the socio-economic dimensions and leverage blockchain data in this dynamic domain. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.17227v1-abstract-full').style.display = 'none'; document.getElementById('2404.17227v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.16484">arXiv:2404.16484</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.16484">pdf</a>, <a href="https://arxiv.org/format/2404.16484">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Real-Time 4K Super-Resolution of Compressed AVIF Images. AIS 2024 Challenge Survey </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Conde%2C+M+V">Marcos V. Conde</a>, <a href="/search/cs?searchtype=author&amp;query=Lei%2C+Z">Zhijun Lei</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+W">Wen Li</a>, <a href="/search/cs?searchtype=author&amp;query=Stejerean%2C+C">Cosmin Stejerean</a>, <a href="/search/cs?searchtype=author&amp;query=Katsavounidis%2C+I">Ioannis Katsavounidis</a>, <a href="/search/cs?searchtype=author&amp;query=Timofte%2C+R">Radu Timofte</a>, <a href="/search/cs?searchtype=author&amp;query=Yoon%2C+K">Kihwan Yoon</a>, <a href="/search/cs?searchtype=author&amp;query=Gankhuyag%2C+G">Ganzorig Gankhuyag</a>, <a href="/search/cs?searchtype=author&amp;query=Lv%2C+J">Jiangtao Lv</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+L">Long Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+J">Jinshan Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+J">Jiangxin Dong</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+J">Jinhui Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Z">Zhiyuan Li</a>, <a href="/search/cs?searchtype=author&amp;query=Wei%2C+H">Hao Wei</a>, <a href="/search/cs?searchtype=author&amp;query=Ge%2C+C">Chenyang Ge</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+D">Dongyang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+T">Tianle Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+H">Huaian Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Jin%2C+Y">Yi Jin</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+M">Menghan Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+Y">Yiqiang Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+S">Si Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+B">Biao Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+S">Shaoli Liu</a> , et al. (50 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.16484v1-abstract-short" style="display: inline;"> This paper introduces a novel benchmark as part of the AIS 2024 Real-Time Image Super-Resolution (RTSR) Challenge, which aims to upscale compressed images from 540p to 4K resolution (4x factor) in real-time on commercial GPUs. For this, we use a diverse test set containing a variety of 4K images ranging from digital art to gaming and photography. The images are compressed using the modern AVIF cod&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.16484v1-abstract-full').style.display = 'inline'; document.getElementById('2404.16484v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.16484v1-abstract-full" style="display: none;"> This paper introduces a novel benchmark as part of the AIS 2024 Real-Time Image Super-Resolution (RTSR) Challenge, which aims to upscale compressed images from 540p to 4K resolution (4x factor) in real-time on commercial GPUs. For this, we use a diverse test set containing a variety of 4K images ranging from digital art to gaming and photography. The images are compressed using the modern AVIF codec, instead of JPEG. All the proposed methods improve PSNR fidelity over Lanczos interpolation, and process images under 10ms. Out of the 160 participants, 25 teams submitted their code and models. The solutions present novel designs tailored for memory-efficiency and runtime on edge devices. This survey describes the best solutions for real-time SR of compressed high-resolution images. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.16484v1-abstract-full').style.display = 'none'; document.getElementById('2404.16484v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">CVPR 2024, AI for Streaming (AIS) Workshop</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.14248">arXiv:2404.14248</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.14248">pdf</a>, <a href="https://arxiv.org/format/2404.14248">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> NTIRE 2024 Challenge on Low Light Image Enhancement: Methods and Results </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+X">Xiaoning Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Z">Zongwei Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+A">Ao Li</a>, <a href="/search/cs?searchtype=author&amp;query=Vasluianu%2C+F">Florin-Alexandru Vasluianu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yulun Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Gu%2C+S">Shuhang Gu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+L">Le Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+C">Ce Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Timofte%2C+R">Radu Timofte</a>, <a href="/search/cs?searchtype=author&amp;query=Jin%2C+Z">Zhi Jin</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+H">Hongjun Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+C">Chenxi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Ling%2C+H">Haitao Ling</a>, <a href="/search/cs?searchtype=author&amp;query=Cai%2C+Y">Yuanhao Cai</a>, <a href="/search/cs?searchtype=author&amp;query=Bian%2C+H">Hao Bian</a>, <a href="/search/cs?searchtype=author&amp;query=Zheng%2C+Y">Yuxin Zheng</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+J">Jing Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Yuille%2C+A">Alan Yuille</a>, <a href="/search/cs?searchtype=author&amp;query=Shao%2C+B">Ben Shao</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+J">Jin Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+T">Tianli Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+M">Mohao Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+Y">Yixu Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Hou%2C+S">Shuo Hou</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+H">Haotian Lin</a> , et al. (87 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.14248v1-abstract-short" style="display: inline;"> This paper reviews the NTIRE 2024 low light image enhancement challenge, highlighting the proposed solutions and results. The aim of this challenge is to discover an effective network design or solution capable of generating brighter, clearer, and visually appealing results when dealing with a variety of conditions, including ultra-high resolution (4K and beyond), non-uniform illumination, backlig&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.14248v1-abstract-full').style.display = 'inline'; document.getElementById('2404.14248v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.14248v1-abstract-full" style="display: none;"> This paper reviews the NTIRE 2024 low light image enhancement challenge, highlighting the proposed solutions and results. The aim of this challenge is to discover an effective network design or solution capable of generating brighter, clearer, and visually appealing results when dealing with a variety of conditions, including ultra-high resolution (4K and beyond), non-uniform illumination, backlighting, extreme darkness, and night scenes. A notable total of 428 participants registered for the challenge, with 22 teams ultimately making valid submissions. This paper meticulously evaluates the state-of-the-art advancements in enhancing low-light images, reflecting the significant progress and creativity in this field. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.14248v1-abstract-full').style.display = 'none'; document.getElementById('2404.14248v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">NTIRE 2024 Challenge Report</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.00563">arXiv:2404.00563</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.00563">pdf</a>, <a href="https://arxiv.org/format/2404.00563">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Exploiting Inter-sample and Inter-feature Relations in Dataset Distillation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Wenxiao Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+W">Wenbin Li</a>, <a href="/search/cs?searchtype=author&amp;query=Ding%2C+T">Tianyu Ding</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+L">Lei Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H">Hongguang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+K">Kuihua Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Huo%2C+J">Jing Huo</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+Y">Yang Gao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.00563v1-abstract-short" style="display: inline;"> Dataset distillation has emerged as a promising approach in deep learning, enabling efficient training with small synthetic datasets derived from larger real ones. Particularly, distribution matching-based distillation methods attract attention thanks to its effectiveness and low computational cost. However, these methods face two primary limitations: the dispersed feature distribution within the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.00563v1-abstract-full').style.display = 'inline'; document.getElementById('2404.00563v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.00563v1-abstract-full" style="display: none;"> Dataset distillation has emerged as a promising approach in deep learning, enabling efficient training with small synthetic datasets derived from larger real ones. Particularly, distribution matching-based distillation methods attract attention thanks to its effectiveness and low computational cost. However, these methods face two primary limitations: the dispersed feature distribution within the same class in synthetic datasets, reducing class discrimination, and an exclusive focus on mean feature consistency, lacking precision and comprehensiveness. To address these challenges, we introduce two novel constraints: a class centralization constraint and a covariance matching constraint. The class centralization constraint aims to enhance class discrimination by more closely clustering samples within classes. The covariance matching constraint seeks to achieve more accurate feature distribution matching between real and synthetic datasets through local feature covariance matrices, particularly beneficial when sample sizes are much smaller than the number of features. Experiments demonstrate notable improvements with these constraints, yielding performance boosts of up to 6.6% on CIFAR10, 2.9% on SVHN, 2.5% on CIFAR100, and 2.5% on TinyImageNet, compared to the state-of-the-art relevant methods. In addition, our method maintains robust performance in cross-architecture settings, with a maximum performance drop of 1.7% on four architectures. Code is available at https://github.com/VincenDen/IID. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.00563v1-abstract-full').style.display = 'none'; document.getElementById('2404.00563v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to CVPR 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.19322">arXiv:2403.19322</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.19322">pdf</a>, <a href="https://arxiv.org/format/2403.19322">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Plug-and-Play Grounding of Reasoning in Multimodal Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jiaxing Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yuxuan Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+D">Dehu Li</a>, <a href="/search/cs?searchtype=author&amp;query=An%2C+X">Xiang An</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weimo Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+Z">Ziyong Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Y">Yongle Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+Y">Yin Xie</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.19322v2-abstract-short" style="display: inline;"> The rise of Multimodal Large Language Models (MLLMs), renowned for their advanced instruction-following and reasoning capabilities, has significantly propelled the field of visual reasoning. However, due to limitations in their image tokenization processes, most MLLMs struggle to capture fine details of text and objects in images, especially in high-resolution samples. To overcome this limitation,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.19322v2-abstract-full').style.display = 'inline'; document.getElementById('2403.19322v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.19322v2-abstract-full" style="display: none;"> The rise of Multimodal Large Language Models (MLLMs), renowned for their advanced instruction-following and reasoning capabilities, has significantly propelled the field of visual reasoning. However, due to limitations in their image tokenization processes, most MLLMs struggle to capture fine details of text and objects in images, especially in high-resolution samples. To overcome this limitation, we introduce P2G, a novel framework for plug-and-play grounding in MLLMs. P2G utilizes the tool-usage potential of MLLMs to employ expert agents for on-the-fly grounding of reasoning into critical visual and textual elements in images, thereby enabling deliberate reasoning through multimodal prompting. Additionally, we develop P2GB, a benchmark designed to evaluate MLLMs&#39; proficiency in understanding inter-object relationships and textual content in challenging high-resolution images. Extensive experiments on visual reasoning tasks demonstrate the superiority of P2G, achieving performance comparable to GPT-4V on P2GB with a 7B backbone. Our work underscores the potential of grounding reasoning with external agents in MLLMs, presenting a promising alternative to mere model scaling. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.19322v2-abstract-full').style.display = 'none'; document.getElementById('2403.19322v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages, 8 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.17752">arXiv:2403.17752</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.17752">pdf</a>, <a href="https://arxiv.org/format/2403.17752">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Can multiple-choice questions really be useful in detecting the abilities of LLMs? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+W">Wangyue Li</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+L">Liangzhi Li</a>, <a href="/search/cs?searchtype=author&amp;query=Xiang%2C+T">Tong Xiang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+X">Xiao Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Wei Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Garcia%2C+N">Noa Garcia</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.17752v3-abstract-short" style="display: inline;"> Multiple-choice questions (MCQs) are widely used in the evaluation of large language models (LLMs) due to their simplicity and efficiency. However, there are concerns about whether MCQs can truly measure LLM&#39;s capabilities, particularly in knowledge-intensive scenarios where long-form generation (LFG) answers are required. The misalignment between the task and the evaluation method demands a thoug&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.17752v3-abstract-full').style.display = 'inline'; document.getElementById('2403.17752v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.17752v3-abstract-full" style="display: none;"> Multiple-choice questions (MCQs) are widely used in the evaluation of large language models (LLMs) due to their simplicity and efficiency. However, there are concerns about whether MCQs can truly measure LLM&#39;s capabilities, particularly in knowledge-intensive scenarios where long-form generation (LFG) answers are required. The misalignment between the task and the evaluation method demands a thoughtful analysis of MCQ&#39;s efficacy, which we undertake in this paper by evaluating nine LLMs on four question-answering (QA) datasets in two languages: Chinese and English. We identify a significant issue: LLMs exhibit an order sensitivity in bilingual MCQs, favoring answers located at specific positions, i.e., the first position. We further quantify the gap between MCQs and long-form generation questions (LFGQs) by comparing their direct outputs, token logits, and embeddings. Our results reveal a relatively low correlation between answers from MCQs and LFGQs for identical questions. Additionally, we propose two methods to quantify the consistency and confidence of LLMs&#39; output, which can be generalized to other QA evaluation benchmarks. Notably, our analysis challenges the idea that the higher the consistency, the greater the accuracy. We also find MCQs to be less reliable than LFGQs in terms of expected calibration error. Finally, the misalignment between MCQs and LFGQs is not only reflected in the evaluation performance but also in the embedding space. Our code and models can be accessed at https://github.com/Meetyou-AI-Lab/Can-MC-Evaluate-LLMs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.17752v3-abstract-full').style.display = 'none'; document.getElementById('2403.17752v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">LREC-COLING 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.14760">arXiv:2403.14760</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.14760">pdf</a>, <a href="https://arxiv.org/format/2403.14760">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Can 3D Vision-Language Models Truly Understand Natural Language? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weipeng Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+J">Jihan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Ding%2C+R">Runyu Ding</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+J">Jiahui Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yijiang Li</a>, <a href="/search/cs?searchtype=author&amp;query=Qi%2C+X">Xiaojuan Qi</a>, <a href="/search/cs?searchtype=author&amp;query=Ngai%2C+E">Edith Ngai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.14760v3-abstract-short" style="display: inline;"> Rapid advancements in 3D vision-language (3D-VL) tasks have opened up new avenues for human interaction with embodied agents or robots using natural language. Despite this progress, we find a notable limitation: existing 3D-VL models exhibit sensitivity to the styles of language input, struggling to understand sentences with the same semantic meaning but written in different variants. This observa&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.14760v3-abstract-full').style.display = 'inline'; document.getElementById('2403.14760v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.14760v3-abstract-full" style="display: none;"> Rapid advancements in 3D vision-language (3D-VL) tasks have opened up new avenues for human interaction with embodied agents or robots using natural language. Despite this progress, we find a notable limitation: existing 3D-VL models exhibit sensitivity to the styles of language input, struggling to understand sentences with the same semantic meaning but written in different variants. This observation raises a critical question: Can 3D vision-language models truly understand natural language? To test the language understandability of 3D-VL models, we first propose a language robustness task for systematically assessing 3D-VL models across various tasks, benchmarking their performance when presented with different language style variants. Importantly, these variants are commonly encountered in applications requiring direct interaction with humans, such as embodied robotics, given the diversity and unpredictability of human language. We propose a 3D Language Robustness Dataset, designed based on the characteristics of human language, to facilitate the systematic study of robustness. Our comprehensive evaluation uncovers a significant drop in the performance of all existing models across various 3D-VL tasks. Even the state-of-the-art 3D-LLM fails to understand some variants of the same sentences. Further in-depth analysis suggests that the existing models have a fragile and biased fusion module, which stems from the low diversity of the existing dataset. Finally, we propose a training-free module driven by LLM, which improves language robustness. Datasets and code will be available at github. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.14760v3-abstract-full').style.display = 'none'; document.getElementById('2403.14760v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 21 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">https://github.com/VincentDENGP/3D-LR</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.10873">arXiv:2403.10873</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.10873">pdf</a>, <a href="https://arxiv.org/format/2403.10873">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> CSI Transfer From Sub-6G to mmWave: Reduced-Overhead Multi-User Hybrid Beamforming </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weicao Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+M">Min Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+M">Ming-Min Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+M">Min-Jian Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Simeone%2C+O">Osvaldo Simeone</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.10873v2-abstract-short" style="display: inline;"> Hybrid beamforming is vital in modern wireless systems, especially for massive MIMO and millimeter-wave (mmWave) deployments, offering efficient directional transmission with reduced hardware complexity. However, effective beamforming in multi-user scenarios relies heavily on accurate channel state information, the acquisition of which often requires significant pilot overhead, degrading system pe&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.10873v2-abstract-full').style.display = 'inline'; document.getElementById('2403.10873v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.10873v2-abstract-full" style="display: none;"> Hybrid beamforming is vital in modern wireless systems, especially for massive MIMO and millimeter-wave (mmWave) deployments, offering efficient directional transmission with reduced hardware complexity. However, effective beamforming in multi-user scenarios relies heavily on accurate channel state information, the acquisition of which often requires significant pilot overhead, degrading system performance. To address this and inspired by the spatial congruence between sub-6GHz (sub-6G) and mmWave channels, we propose a Sub-6G information Aided Multi-User Hybrid Beamforming (SA-MUHBF) framework, avoiding excessive use of pilots at mmWave. SA-MUHBF employs a convolutional neural network to predict mmWave beamspace from sub-6G channel estimate, followed by a novel multi-layer graph neural network for analog beam selection and a linear minimum mean-square error algorithm for digital beamforming. Numerical results demonstrate that SA-MUHBF efficiently predicts the mmWave beamspace representation and achieves superior spectrum efficiency over state-of-the-art benchmarks. Moreover, SA-MUHBF demonstrates robust performance across varied sub-6G system configurations and exhibits strong generalization to unseen scenarios. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.10873v2-abstract-full').style.display = 'none'; document.getElementById('2403.10873v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by IEEE JSAC NGAT</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.09500">arXiv:2403.09500</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.09500">pdf</a>, <a href="https://arxiv.org/format/2403.09500">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Faceptor: A Generalist Model for Face Perception </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Qin%2C+L">Lixiong Qin</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+M">Mei Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+X">Xuannan Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yuhang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Wei Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+X">Xiaoshuai Song</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+W">Weiran Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weihong Deng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.09500v1-abstract-short" style="display: inline;"> With the comprehensive research conducted on various face analysis tasks, there is a growing interest among researchers to develop a unified approach to face perception. Existing methods mainly discuss unified representation and training, which lack task extensibility and application efficiency. To tackle this issue, we focus on the unified model structure, exploring a face generalist model. As an&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.09500v1-abstract-full').style.display = 'inline'; document.getElementById('2403.09500v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.09500v1-abstract-full" style="display: none;"> With the comprehensive research conducted on various face analysis tasks, there is a growing interest among researchers to develop a unified approach to face perception. Existing methods mainly discuss unified representation and training, which lack task extensibility and application efficiency. To tackle this issue, we focus on the unified model structure, exploring a face generalist model. As an intuitive design, Naive Faceptor enables tasks with the same output shape and granularity to share the structural design of the standardized output head, achieving improved task extensibility. Furthermore, Faceptor is proposed to adopt a well-designed single-encoder dual-decoder architecture, allowing task-specific queries to represent new-coming semantics. This design enhances the unification of model structure while improving application efficiency in terms of storage overhead. Additionally, we introduce Layer-Attention into Faceptor, enabling the model to adaptively select features from optimal layers to perform the desired tasks. Through joint training on 13 face perception datasets, Faceptor achieves exceptional performance in facial landmark localization, face parsing, age estimation, expression recognition, binary attribute classification, and face recognition, achieving or surpassing specialized methods in most tasks. Our training framework can also be applied to auxiliary supervised learning, significantly improving performance in data-sparse tasks such as age estimation and expression recognition. The code and models will be made publicly available at https://github.com/lxq1000/Faceptor. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.09500v1-abstract-full').style.display = 'none'; document.getElementById('2403.09500v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.06529">arXiv:2403.06529</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.06529">pdf</a>, <a href="https://arxiv.org/format/2403.06529">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Confidence-Aware RGB-D Face Recognition via Virtual Depth Synthesis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Z">Zijian Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+M">Mei Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+W">Weihong Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Shi%2C+H">Hongzhi Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Wen%2C+D">Dongchao Wen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yingjie Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Cui%2C+X">Xingchen Cui</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+J">Jian Zhao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.06529v2-abstract-short" style="display: inline;"> 2D face recognition encounters challenges in unconstrained environments due to varying illumination, occlusion, and pose. Recent studies focus on RGB-D face recognition to improve robustness by incorporating depth information. However, collecting sufficient paired RGB-D training data is expensive and time-consuming, hindering wide deployment. In this work, we first construct a diverse depth datase&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.06529v2-abstract-full').style.display = 'inline'; document.getElementById('2403.06529v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.06529v2-abstract-full" style="display: none;"> 2D face recognition encounters challenges in unconstrained environments due to varying illumination, occlusion, and pose. Recent studies focus on RGB-D face recognition to improve robustness by incorporating depth information. However, collecting sufficient paired RGB-D training data is expensive and time-consuming, hindering wide deployment. In this work, we first construct a diverse depth dataset generated by 3D Morphable Models for depth model pre-training. Then, we propose a domain-independent pre-training framework that utilizes readily available pre-trained RGB and depth models to separately perform face recognition without needing additional paired data for retraining. To seamlessly integrate the two distinct networks and harness the complementary benefits of RGB and depth information for improved accuracy, we propose an innovative Adaptive Confidence Weighting (ACW). This mechanism is designed to learn confidence estimates for each modality to achieve modality fusion at the score level. Our method is simple and lightweight, only requiring ACW training beyond the backbone models. Experiments on multiple public RGB-D face recognition benchmarks demonstrate state-of-the-art performance surpassing previous methods based on depth estimation and feature fusion, validating the efficacy of our approach. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.06529v2-abstract-full').style.display = 'none'; document.getElementById('2403.06529v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 5 figures</span> </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Deng%2C+W&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Deng%2C+W&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Deng%2C+W&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Deng%2C+W&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Deng%2C+W&amp;start=150" class="pagination-link " aria-label="Page 4" aria-current="page">4 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Deng%2C+W&amp;start=200" class="pagination-link " aria-label="Page 5" aria-current="page">5 </a> </li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10