CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;45 of 45 results for author: <span class="mathjax">Zou, P</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Zou%2C+P">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Zou, P"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Zou%2C+P&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Zou, P"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13578">arXiv:2411.13578</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.13578">pdf</a>, <a href="https://arxiv.org/format/2411.13578">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> COOD: Concept-based Zero-shot OOD Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zhendong Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Nian%2C+Y">Yi Nian</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+H+P">Henry Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+L">Li Li</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+X">Xiyang Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Y">Yue Zhao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13578v1-abstract-short" style="display: inline;"> How can models effectively detect out-of-distribution (OOD) samples in complex, multi-label settings without extensive retraining? Existing OOD detection methods struggle to capture the intricate semantic relationships and label co-occurrences inherent in multi-label settings, often requiring large amounts of training data and failing to generalize to unseen label combinations. While large languag&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13578v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13578v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13578v1-abstract-full" style="display: none;"> How can models effectively detect out-of-distribution (OOD) samples in complex, multi-label settings without extensive retraining? Existing OOD detection methods struggle to capture the intricate semantic relationships and label co-occurrences inherent in multi-label settings, often requiring large amounts of training data and failing to generalize to unseen label combinations. While large language models have revolutionized zero-shot OOD detection, they primarily focus on single-label scenarios, leaving a critical gap in handling real-world tasks where samples can be associated with multiple interdependent labels. To address these challenges, we introduce COOD, a novel zero-shot multi-label OOD detection framework. COOD leverages pre-trained vision-language models, enhancing them with a concept-based label expansion strategy and a new scoring function. By enriching the semantic space with both positive and negative concepts for each label, our approach models complex label dependencies, precisely differentiating OOD samples without the need for additional training. Extensive experiments demonstrate that our method significantly outperforms existing approaches, achieving approximately 95% average AUROC on both VOC and COCO datasets, while maintaining robust performance across varying numbers of labels and different types of OOD samples. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13578v1-abstract-full').style.display = 'none'; document.getElementById('2411.13578v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.11327">arXiv:2410.11327</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.11327">pdf</a>, <a href="https://arxiv.org/format/2410.11327">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Sequential LLM Framework for Fashion Recommendation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+H">Han Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+X">Xianfeng Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+T">Tianlang Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+J">Jiapeng Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Indu%2C+I">Indu Indu</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+H+P">Henry Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Dai%2C+P">Peng Dai</a>, <a href="/search/cs?searchtype=author&amp;query=Galan%2C+R+F">Roberto Fernandez Galan</a>, <a href="/search/cs?searchtype=author&amp;query=Porter%2C+M+D">Michael D Porter</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+D">Dongmei Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+N">Ning Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Xiong%2C+L">Lian Xiong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.11327v1-abstract-short" style="display: inline;"> The fashion industry is one of the leading domains in the global e-commerce sector, prompting major online retailers to employ recommendation systems for product suggestions and customer convenience. While recommendation systems have been widely studied, most are designed for general e-commerce problems and struggle with the unique challenges of the fashion domain. To address these issues, we prop&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.11327v1-abstract-full').style.display = 'inline'; document.getElementById('2410.11327v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.11327v1-abstract-full" style="display: none;"> The fashion industry is one of the leading domains in the global e-commerce sector, prompting major online retailers to employ recommendation systems for product suggestions and customer convenience. While recommendation systems have been widely studied, most are designed for general e-commerce problems and struggle with the unique challenges of the fashion domain. To address these issues, we propose a sequential fashion recommendation framework that leverages a pre-trained large language model (LLM) enhanced with recommendation-specific prompts. Our framework employs parameter-efficient fine-tuning with extensive fashion data and introduces a novel mix-up-based retrieval technique for translating text into relevant product suggestions. Extensive experiments show our proposed framework significantly enhances fashion recommendation performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.11327v1-abstract-full').style.display = 'none'; document.getElementById('2410.11327v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.12139">arXiv:2409.12139</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.12139">pdf</a>, <a href="https://arxiv.org/format/2409.12139">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> Takin: A Cohort of Superior Quality Zero-shot Speech Generation Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+S">Sijing Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+Y">Yuan Feng</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+L">Laipeng He</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+T">Tianwei He</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+W">Wendi He</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+Y">Yanni Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+B">Bin Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+Y">Yiting Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+Y">Yu Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+P">Pengfei Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Tian%2C+C">Chengwei Tian</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+C">Chen Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhicheng Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+R">Ruoye Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Yao%2C+J">Jixun Yao</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+Q">Quanlei Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Y">Yuguang Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Ye%2C+J">Jianhao Ye</a>, <a href="/search/cs?searchtype=author&amp;query=Yin%2C+J">Jingjing Yin</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+Y">Yanzhen Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H">Huimin Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+X">Xiang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+G">Guangcheng Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+H">Hongbin Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Pengpeng Zou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.12139v3-abstract-short" style="display: inline;"> With the advent of the big data and large language model era, zero-shot personalized rapid customization has emerged as a significant trend. In this report, we introduce Takin AudioLLM, a series of techniques and models, mainly including Takin TTS, Takin VC, and Takin Morphing, specifically designed for audiobook production. These models are capable of zero-shot speech production, generating high-&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.12139v3-abstract-full').style.display = 'inline'; document.getElementById('2409.12139v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.12139v3-abstract-full" style="display: none;"> With the advent of the big data and large language model era, zero-shot personalized rapid customization has emerged as a significant trend. In this report, we introduce Takin AudioLLM, a series of techniques and models, mainly including Takin TTS, Takin VC, and Takin Morphing, specifically designed for audiobook production. These models are capable of zero-shot speech production, generating high-quality speech that is nearly indistinguishable from real human speech and facilitating individuals to customize the speech content according to their own needs. Specifically, we first introduce Takin TTS, a neural codec language model that builds upon an enhanced neural speech codec and a multi-task training framework, capable of generating high-fidelity natural speech in a zero-shot way. For Takin VC, we advocate an effective content and timbre joint modeling approach to improve the speaker similarity, while advocating for a conditional flow matching based decoder to further enhance its naturalness and expressiveness. Last, we propose the Takin Morphing system with highly decoupled and advanced timbre and prosody modeling approaches, which enables individuals to customize speech production with their preferred timbre and prosody in a precise and controllable manner. Extensive experiments validate the effectiveness and robustness of our Takin AudioLLM series models. For detailed demos, please refer to https://everest-ai.github.io/takinaudiollm/. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.12139v3-abstract-full').style.display = 'none'; document.getElementById('2409.12139v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Technical Report; 18 pages; typos corrected, references added, demo url modified, author name modified;</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.09927">arXiv:2409.09927</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.09927">pdf</a>, <a href="https://arxiv.org/format/2409.09927">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Towards Data Contamination Detection for Modern Large Language Models: Limitations, Inconsistencies, and Oracle Challenges </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Samuel%2C+V">Vinay Samuel</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Y">Yue Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+H+P">Henry Peng Zou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.09927v1-abstract-short" style="display: inline;"> As large language models achieve increasingly impressive results, questions arise about whether such performance is from generalizability or mere data memorization. Thus, numerous data contamination detection methods have been proposed. However, these approaches are often validated with traditional benchmarks and early-stage LLMs, leaving uncertainty about their effectiveness when evaluating state&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.09927v1-abstract-full').style.display = 'inline'; document.getElementById('2409.09927v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.09927v1-abstract-full" style="display: none;"> As large language models achieve increasingly impressive results, questions arise about whether such performance is from generalizability or mere data memorization. Thus, numerous data contamination detection methods have been proposed. However, these approaches are often validated with traditional benchmarks and early-stage LLMs, leaving uncertainty about their effectiveness when evaluating state-of-the-art LLMs on the contamination of more challenging benchmarks. To address this gap and provide a dual investigation of SOTA LLM contamination status and detection method robustness, we evaluate five contamination detection approaches with four state-of-the-art LLMs across eight challenging datasets often used in modern LLM evaluation. Our analysis reveals that (1) Current methods have non-trivial limitations in their assumptions and practical applications; (2) Notable difficulties exist in detecting contamination introduced during instruction fine-tuning with answer augmentation; and (3) Limited consistencies between SOTA contamination detection techniques. These findings highlight the complexity of contamination detection in advanced LLMs and the urgent need for further research on robust and generalizable contamination evaluation. Our code is available at https://github.com/vsamuel2003/data-contamination. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.09927v1-abstract-full').style.display = 'none'; document.getElementById('2409.09927v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 1 figure</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.09214">arXiv:2409.09214</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.09214">pdf</a>, <a href="https://arxiv.org/format/2409.09214">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> Seed-Music: A Unified Framework for High Quality and Controlled Music Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bai%2C+Y">Ye Bai</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+H">Haonan Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jitong Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Z">Zhuo Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+Y">Yi Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+X">Xiaohong Dong</a>, <a href="/search/cs?searchtype=author&amp;query=Hantrakul%2C+L">Lamtharn Hantrakul</a>, <a href="/search/cs?searchtype=author&amp;query=Hao%2C+W">Weituo Hao</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+Q">Qingqing Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+Z">Zhongyi Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+D">Dongya Jia</a>, <a href="/search/cs?searchtype=author&amp;query=La%2C+F">Feihu La</a>, <a href="/search/cs?searchtype=author&amp;query=Le%2C+D">Duc Le</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+B">Bochen Li</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+C">Chumin Li</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+H">Hui Li</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+X">Xingxing Li</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+S">Shouda Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+W">Wei-Tsung Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+Y">Yiqing Lu</a>, <a href="/search/cs?searchtype=author&amp;query=Shaw%2C+A">Andrew Shaw</a>, <a href="/search/cs?searchtype=author&amp;query=Spijkervet%2C+J">Janne Spijkervet</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+Y">Yakun Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+B">Bo Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J">Ju-Chiang Wang</a> , et al. (13 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.09214v3-abstract-short" style="display: inline;"> We introduce Seed-Music, a suite of music generation systems capable of producing high-quality music with fine-grained style control. Our unified framework leverages both auto-regressive language modeling and diffusion approaches to support two key music creation workflows: controlled music generation and post-production editing. For controlled music generation, our system enables vocal music gene&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.09214v3-abstract-full').style.display = 'inline'; document.getElementById('2409.09214v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.09214v3-abstract-full" style="display: none;"> We introduce Seed-Music, a suite of music generation systems capable of producing high-quality music with fine-grained style control. Our unified framework leverages both auto-regressive language modeling and diffusion approaches to support two key music creation workflows: controlled music generation and post-production editing. For controlled music generation, our system enables vocal music generation with performance controls from multi-modal inputs, including style descriptions, audio references, musical scores, and voice prompts. For post-production editing, it offers interactive tools for editing lyrics and vocal melodies directly in the generated audio. We encourage readers to listen to demo audio examples at https://team.doubao.com/seed-music &#34;https://team.doubao.com/seed-music&#34;. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.09214v3-abstract-full').style.display = 'none'; document.getElementById('2409.09214v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Seed-Music technical report, 20 pages, 5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.03055">arXiv:2409.03055</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.03055">pdf</a>, <a href="https://arxiv.org/format/2409.03055">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> SymPAC: Scalable Symbolic Music Generation With Prompts And Constraints </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+H">Haonan Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Smith%2C+J+B+L">Jordan B. L. Smith</a>, <a href="/search/cs?searchtype=author&amp;query=Spijkervet%2C+J">Janne Spijkervet</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+J">Ju-Chiang Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Pei Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+B">Bochen Li</a>, <a href="/search/cs?searchtype=author&amp;query=Kong%2C+Q">Qiuqiang Kong</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+X">Xingjian Du</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.03055v2-abstract-short" style="display: inline;"> Progress in the task of symbolic music generation may be lagging behind other tasks like audio and text generation, in part because of the scarcity of symbolic training data. In this paper, we leverage the greater scale of audio music data by applying pre-trained MIR models (for transcription, beat tracking, structure analysis, etc.) to extract symbolic events and encode them into token sequences.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.03055v2-abstract-full').style.display = 'inline'; document.getElementById('2409.03055v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.03055v2-abstract-full" style="display: none;"> Progress in the task of symbolic music generation may be lagging behind other tasks like audio and text generation, in part because of the scarcity of symbolic training data. In this paper, we leverage the greater scale of audio music data by applying pre-trained MIR models (for transcription, beat tracking, structure analysis, etc.) to extract symbolic events and encode them into token sequences. To the best of our knowledge, this work is the first to demonstrate the feasibility of training symbolic generation models solely from auto-transcribed audio data. Furthermore, to enhance the controllability of the trained model, we introduce SymPAC (Symbolic Music Language Model with Prompting And Constrained Generation), which is distinguished by using (a) prompt bars in encoding and (b) a technique called Constrained Generation via Finite State Machines (FSMs) during inference time. We show the flexibility and controllability of this approach, which may be critical in making music AI useful to creators and users. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.03055v2-abstract-full').style.display = 'none'; document.getElementById('2409.03055v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ISMIR 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.18910">arXiv:2407.18910</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.18910">pdf</a>, <a href="https://arxiv.org/format/2407.18910">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> </div> </div> <p class="title is-5 mathjax"> Do We Really Need Graph Convolution During Training? Light Post-Training Graph-ODE for Efficient Recommendation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+W">Weizhi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+L">Liangwei Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+Z">Zihe Song</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+H+P">Henry Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+K">Ke Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Fang%2C+L">Liancheng Fang</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+P+S">Philip S. Yu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.18910v2-abstract-short" style="display: inline;"> The efficiency and scalability of graph convolution networks (GCNs) in training recommender systems (RecSys) have been persistent concerns, hindering their deployment in real-world applications. This paper presents a critical examination of the necessity of graph convolutions during the training phase and introduces an innovative alternative: the Light Post-Training Graph Ordinary-Differential-Equ&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.18910v2-abstract-full').style.display = 'inline'; document.getElementById('2407.18910v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.18910v2-abstract-full" style="display: none;"> The efficiency and scalability of graph convolution networks (GCNs) in training recommender systems (RecSys) have been persistent concerns, hindering their deployment in real-world applications. This paper presents a critical examination of the necessity of graph convolutions during the training phase and introduces an innovative alternative: the Light Post-Training Graph Ordinary-Differential-Equation (LightGODE). Our investigation reveals that the benefits of GCNs are more pronounced during testing rather than training. Motivated by this, LightGODE utilizes a novel post-training graph convolution method that bypasses the computation-intensive message passing of GCNs and employs a non-parametric continuous graph ordinary-differential-equation (ODE) to dynamically model node representations. This approach drastically reduces training time while achieving fine-grained post-training graph convolution to avoid the distortion of the original training embedding space, termed the embedding discrepancy issue. We validate our model across several real-world datasets of different scales, demonstrating that LightGODE not only outperforms GCN-based models in terms of efficiency and effectiveness but also significantly mitigates the embedding discrepancy commonly associated with deeper graph convolution layers. Our LightGODE challenges the prevailing paradigms in RecSys training and suggests re-evaluating the role of graph convolutions, potentially guiding future developments of efficient large-scale graph-based RecSys. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.18910v2-abstract-full').style.display = 'none'; document.getElementById('2407.18910v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to CIKM 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.18416">arXiv:2407.18416</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.18416">pdf</a>, <a href="https://arxiv.org/format/2407.18416">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> PersonaGym: Evaluating Persona Agents and LLMs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Samuel%2C+V">Vinay Samuel</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+H+P">Henry Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Y">Yue Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Chaudhari%2C+S">Shreyas Chaudhari</a>, <a href="/search/cs?searchtype=author&amp;query=Kalyan%2C+A">Ashwin Kalyan</a>, <a href="/search/cs?searchtype=author&amp;query=Rajpurohit%2C+T">Tanmay Rajpurohit</a>, <a href="/search/cs?searchtype=author&amp;query=Deshpande%2C+A">Ameet Deshpande</a>, <a href="/search/cs?searchtype=author&amp;query=Narasimhan%2C+K">Karthik Narasimhan</a>, <a href="/search/cs?searchtype=author&amp;query=Murahari%2C+V">Vishvak Murahari</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.18416v2-abstract-short" style="display: inline;"> Persona agents, which are LLM agents that act according to an assigned persona, have demonstrated impressive contextual response capabilities across various applications. These persona agents offer significant enhancements across diverse sectors, such as education, healthcare, and entertainment, where model developers can align agent responses to different user requirements thereby broadening the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.18416v2-abstract-full').style.display = 'inline'; document.getElementById('2407.18416v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.18416v2-abstract-full" style="display: none;"> Persona agents, which are LLM agents that act according to an assigned persona, have demonstrated impressive contextual response capabilities across various applications. These persona agents offer significant enhancements across diverse sectors, such as education, healthcare, and entertainment, where model developers can align agent responses to different user requirements thereby broadening the scope of agent applications. However, evaluating persona agent performance is incredibly challenging due to the complexity of assessing persona adherence in free-form interactions across various environments that are relevant to each persona agent. We introduce PersonaGym, the first dynamic evaluation framework for assessing persona agents, and PersonaScore, the first automated human-aligned metric grounded in decision theory for comprehensive large-scale evaluation of persona agents. Our evaluation of 6 open and closed-source LLMs, using a benchmark encompassing 200 personas and 10,000 questions, reveals significant opportunities for advancement in persona agent capabilities across state-of-the-art models. For example, Claude 3.5 Sonnet only has a 2.97% relative improvement in PersonaScore than GPT 3.5 despite being a much more advanced model. Importantly, we find that increased model size and complexity do not necessarily imply enhanced persona agent capabilities thereby highlighting the pressing need for algorithmic and architectural invention towards faithful and performant persona agents. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.18416v2-abstract-full').style.display = 'none'; document.getElementById('2407.18416v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 25 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">21 pages, 5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.12037">arXiv:2407.12037</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.12037">pdf</a>, <a href="https://arxiv.org/format/2407.12037">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> A Novel HDL Code Generator for Effectively Testing FPGA Logic Synthesis Compilers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Z">Zhihao Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+S">Shikai Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+G">Guilin Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Peiyu Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+X">Xiaochen Li</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+H">He Jiang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.12037v1-abstract-short" style="display: inline;"> Field Programmable Gate Array (FPGA) logic synthesis compilers (e.g., Vivado, Iverilog, Yosys, and Quartus) are widely applied in Electronic Design Automation (EDA), such as the development of FPGA programs.However, defects (i.e., incorrect synthesis) in logic synthesis compilers may lead to unexpected behaviors in target applications, posing security risks. Therefore, it is crucial to thoroughly&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.12037v1-abstract-full').style.display = 'inline'; document.getElementById('2407.12037v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.12037v1-abstract-full" style="display: none;"> Field Programmable Gate Array (FPGA) logic synthesis compilers (e.g., Vivado, Iverilog, Yosys, and Quartus) are widely applied in Electronic Design Automation (EDA), such as the development of FPGA programs.However, defects (i.e., incorrect synthesis) in logic synthesis compilers may lead to unexpected behaviors in target applications, posing security risks. Therefore, it is crucial to thoroughly test logic synthesis compilers to eliminate such defects.Despite several Hardware Design Language (HDL) code generators (e.g., Verismith) have been proposed to find defects in logic synthesis compilers, the effectiveness of these generators is still limited by the simple code generation strategy and the monogeneity of the generated HDL code.This paper proposes LegoHDL, a novel method to generate syntax valid HDL code for comprehensively testing FPGA logic synthesis compilers.LegoHDL can generate more complex and diverse defect-trigger HDL code (e.g., Verilog, VHDL, and SystemVerilog) by leveraging the guidance of abstract syntax tree and the extensive function block libraries of cyber-physical systems. Extensive experiments show that the diversity and defect-trigger capability of HDL code generated by LegoHDL are significantly better than the state-of-the-art method (i.e., Verismith).In three months, LegoHDL has reported 20 new defects--many of which are deep and important; 16 of them have been confirmed. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.12037v1-abstract-full').style.display = 'none'; document.getElementById('2407.12037v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.05721">arXiv:2407.05721</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.05721">pdf</a>, <a href="https://arxiv.org/format/2407.05721">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> PsycoLLM: Enhancing LLM for Psychological Understanding and Evaluation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hu%2C+J">Jinpeng Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+T">Tengteng Dong</a>, <a href="/search/cs?searchtype=author&amp;query=Gang%2C+L">Luo Gang</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+H">Hui Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+X">Xiao Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+D">Dan Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+M">Meng Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.05721v2-abstract-short" style="display: inline;"> Mental health has attracted substantial attention in recent years and LLM can be an effective technology for alleviating this problem owing to its capability in text understanding and dialogue. However, existing research in this domain often suffers from limitations, such as training on datasets lacking crucial prior knowledge and evidence, and the absence of comprehensive evaluation methods. In t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.05721v2-abstract-full').style.display = 'inline'; document.getElementById('2407.05721v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.05721v2-abstract-full" style="display: none;"> Mental health has attracted substantial attention in recent years and LLM can be an effective technology for alleviating this problem owing to its capability in text understanding and dialogue. However, existing research in this domain often suffers from limitations, such as training on datasets lacking crucial prior knowledge and evidence, and the absence of comprehensive evaluation methods. In this paper, we propose a specialized psychological large language model (LLM), named PsycoLLM, trained on a proposed high-quality psychological dataset, including single-turn QA, multi-turn dialogues and knowledge-based QA. Specifically, we construct multi-turn dialogues through a three-step pipeline comprising generation, evidence judgment, and refinement. We augment this process with real-world psychological case backgrounds extracted from online platforms, enhancing the relevance and applicability of the generated data. Additionally, to compare the performance of PsycoLLM with other LLMs, we develop a comprehensive psychological benchmark based on authoritative psychological counseling examinations in China, which includes assessments of professional ethics, theoretical proficiency, and case analysis. The experimental results on the benchmark illustrates the effectiveness of PsycoLLM, which demonstrates superior performance compared to other LLMs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.05721v2-abstract-full').style.display = 'none'; document.getElementById('2407.05721v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">work in progress</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.00869">arXiv:2407.00869</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.00869">pdf</a>, <a href="https://arxiv.org/format/2407.00869">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Large Language Models Are Involuntary Truth-Tellers: Exploiting Fallacy Failure for Jailbreak Attacks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Y">Yue Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+H+P">Henry Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Di+Eugenio%2C+B">Barbara Di Eugenio</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yang Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.00869v2-abstract-short" style="display: inline;"> We find that language models have difficulties generating fallacious and deceptive reasoning. When asked to generate deceptive outputs, language models tend to leak honest counterparts but believe them to be false. Exploiting this deficiency, we propose a jailbreak attack method that elicits an aligned language model for malicious output. Specifically, we query the model to generate a fallacious y&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.00869v2-abstract-full').style.display = 'inline'; document.getElementById('2407.00869v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.00869v2-abstract-full" style="display: none;"> We find that language models have difficulties generating fallacious and deceptive reasoning. When asked to generate deceptive outputs, language models tend to leak honest counterparts but believe them to be false. Exploiting this deficiency, we propose a jailbreak attack method that elicits an aligned language model for malicious output. Specifically, we query the model to generate a fallacious yet deceptively real procedure for the harmful behavior. Since a fallacious procedure is generally considered fake and thus harmless by LLMs, it helps bypass the safeguard mechanism. Yet the output is factually harmful since the LLM cannot fabricate fallacious solutions but proposes truthful ones. We evaluate our approach over five safety-aligned large language models, comparing four previous jailbreak methods, and show that our approach achieves competitive performance with more harmful outputs. We believe the findings could be extended beyond model safety, such as self-verification and hallucination. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.00869v2-abstract-full').style.display = 'none'; document.getElementById('2407.00869v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to the main conference of EMNLP 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.16253">arXiv:2406.16253</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.16253">pdf</a>, <a href="https://arxiv.org/format/2406.16253">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> LLMs Assist NLP Researchers: Critique Paper (Meta-)Reviewing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Du%2C+J">Jiangshu Du</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yibo Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+W">Wenting Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+Z">Zhongfen Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+S">Shuaiqi Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Lou%2C+R">Renze Lou</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+H+P">Henry Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Venkit%2C+P+N">Pranav Narayanan Venkit</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+N">Nan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Srinath%2C+M">Mukund Srinath</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H+R">Haoran Ranran Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Gupta%2C+V">Vipul Gupta</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yinghui Li</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+T">Tao Li</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+F">Fei Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Q">Qin Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+T">Tianlin Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+P">Pengzhi Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Xia%2C+C">Congying Xia</a>, <a href="/search/cs?searchtype=author&amp;query=Xing%2C+C">Chen Xing</a>, <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+J">Jiayang Cheng</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhaowei Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Su%2C+Y">Ying Su</a>, <a href="/search/cs?searchtype=author&amp;query=Shah%2C+R+S">Raj Sanjay Shah</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+R">Ruohao Guo</a> , et al. (15 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.16253v3-abstract-short" style="display: inline;"> This work is motivated by two key trends. On one hand, large language models (LLMs) have shown remarkable versatility in various generative tasks such as writing, drawing, and question answering, significantly reducing the time required for many routine tasks. On the other hand, researchers, whose work is not only time-consuming but also highly expertise-demanding, face increasing challenges as th&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.16253v3-abstract-full').style.display = 'inline'; document.getElementById('2406.16253v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.16253v3-abstract-full" style="display: none;"> This work is motivated by two key trends. On one hand, large language models (LLMs) have shown remarkable versatility in various generative tasks such as writing, drawing, and question answering, significantly reducing the time required for many routine tasks. On the other hand, researchers, whose work is not only time-consuming but also highly expertise-demanding, face increasing challenges as they have to spend more time reading, writing, and reviewing papers. This raises the question: how can LLMs potentially assist researchers in alleviating their heavy workload? This study focuses on the topic of LLMs assist NLP Researchers, particularly examining the effectiveness of LLM in assisting paper (meta-)reviewing and its recognizability. To address this, we constructed the ReviewCritique dataset, which includes two types of information: (i) NLP papers (initial submissions rather than camera-ready) with both human-written and LLM-generated reviews, and (ii) each review comes with &#34;deficiency&#34; labels and corresponding explanations for individual segments, annotated by experts. Using ReviewCritique, this study explores two threads of research questions: (i) &#34;LLMs as Reviewers&#34;, how do reviews generated by LLMs compare with those written by humans in terms of quality and distinguishability? (ii) &#34;LLMs as Metareviewers&#34;, how effectively can LLMs identify potential issues, such as Deficient or unprofessional review segments, within individual paper reviews? To our knowledge, this is the first work to provide such a comprehensive analysis. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.16253v3-abstract-full').style.display = 'none'; document.getElementById('2406.16253v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by EMNLP 2024 main conference</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.05392">arXiv:2406.05392</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.05392">pdf</a>, <a href="https://arxiv.org/format/2406.05392">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Deconstructing The Ethics of Large Language Models from Long-standing Issues to New-emerging Dilemmas: A Survey </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Deng%2C+C">Chengyuan Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+Y">Yiqun Duan</a>, <a href="/search/cs?searchtype=author&amp;query=Jin%2C+X">Xin Jin</a>, <a href="/search/cs?searchtype=author&amp;query=Chang%2C+H">Heng Chang</a>, <a href="/search/cs?searchtype=author&amp;query=Tian%2C+Y">Yijun Tian</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+H">Han Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yichen Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+K">Kuofeng Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+H+P">Henry Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Jin%2C+Y">Yiqiao Jin</a>, <a href="/search/cs?searchtype=author&amp;query=Xiao%2C+Y">Yijia Xiao</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+S">Shenghao Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Xie%2C+Z">Zongxing Xie</a>, <a href="/search/cs?searchtype=author&amp;query=Lyu%2C+W">Weimin Lyu</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+S">Sihong He</a>, <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+L">Lu Cheng</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H">Haohan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhuang%2C+J">Jun Zhuang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.05392v2-abstract-short" style="display: inline;"> Large Language Models (LLMs) have achieved unparalleled success across diverse language modeling tasks in recent years. However, this progress has also intensified ethical concerns, impacting the deployment of LLMs in everyday contexts. This paper provides a comprehensive survey of ethical challenges associated with LLMs, from longstanding issues such as copyright infringement, systematic bias, an&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.05392v2-abstract-full').style.display = 'inline'; document.getElementById('2406.05392v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.05392v2-abstract-full" style="display: none;"> Large Language Models (LLMs) have achieved unparalleled success across diverse language modeling tasks in recent years. However, this progress has also intensified ethical concerns, impacting the deployment of LLMs in everyday contexts. This paper provides a comprehensive survey of ethical challenges associated with LLMs, from longstanding issues such as copyright infringement, systematic bias, and data privacy, to emerging problems like truthfulness and social norms. We critically analyze existing research aimed at understanding, examining, and mitigating these ethical risks. Our survey underscores integrating ethical standards and societal values into the development of LLMs, thereby guiding the development of responsible and ethically aligned language models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.05392v2-abstract-full').style.display = 'none'; document.getElementById('2406.05392v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.15954">arXiv:2404.15954</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.15954">pdf</a>, <a href="https://arxiv.org/format/2404.15954">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Mixed Supervised Graph Contrastive Learning for Recommendation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+W">Weizhi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+L">Liangwei Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+Z">Zihe Song</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+H+P">Henry Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+K">Ke Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+Y">Yuanjie Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+P+S">Philip S. Yu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.15954v2-abstract-short" style="display: inline;"> Recommender systems (RecSys) play a vital role in online platforms, offering users personalized suggestions amidst vast information. Graph contrastive learning aims to learn from high-order collaborative filtering signals with unsupervised augmentation on the user-item bipartite graph, which predominantly relies on the multi-task learning framework involving both the pair-wise recommendation loss&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.15954v2-abstract-full').style.display = 'inline'; document.getElementById('2404.15954v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.15954v2-abstract-full" style="display: none;"> Recommender systems (RecSys) play a vital role in online platforms, offering users personalized suggestions amidst vast information. Graph contrastive learning aims to learn from high-order collaborative filtering signals with unsupervised augmentation on the user-item bipartite graph, which predominantly relies on the multi-task learning framework involving both the pair-wise recommendation loss and the contrastive loss. This decoupled design can cause inconsistent optimization direction from different losses, which leads to longer convergence time and even sub-optimal performance. Besides, the self-supervised contrastive loss falls short in alleviating the data sparsity issue in RecSys as it learns to differentiate users/items from different views without providing extra supervised collaborative filtering signals during augmentations. In this paper, we propose Mixed Supervised Graph Contrastive Learning for Recommendation (MixSGCL) to address these concerns. MixSGCL originally integrates the training of recommendation and unsupervised contrastive losses into a supervised contrastive learning loss to align the two tasks within one optimization direction. To cope with the data sparsity issue, instead unsupervised augmentation, we further propose node-wise and edge-wise mixup to mine more direct supervised collaborative filtering signals based on existing user-item interactions. Extensive experiments on three real-world datasets demonstrate that MixSGCL surpasses state-of-the-art methods, achieving top performance on both accuracy and efficiency. It validates the effectiveness of MixSGCL with our coupled design on supervised graph contrastive learning. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.15954v2-abstract-full').style.display = 'none'; document.getElementById('2404.15954v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.15592">arXiv:2404.15592</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.15592">pdf</a>, <a href="https://arxiv.org/format/2404.15592">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> ImplicitAVE: An Open-Source Dataset and Multimodal LLMs Benchmark for Implicit Attribute Value Extraction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zou%2C+H+P">Henry Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Samuel%2C+V">Vinay Samuel</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Y">Yue Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+W">Weizhi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Fang%2C+L">Liancheng Fang</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+Z">Zihe Song</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+P+S">Philip S. Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Caragea%2C+C">Cornelia Caragea</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.15592v2-abstract-short" style="display: inline;"> Existing datasets for attribute value extraction (AVE) predominantly focus on explicit attribute values while neglecting the implicit ones, lack product images, are often not publicly available, and lack an in-depth human inspection across diverse domains. To address these limitations, we present ImplicitAVE, the first, publicly available multimodal dataset for implicit attribute value extraction.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.15592v2-abstract-full').style.display = 'inline'; document.getElementById('2404.15592v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.15592v2-abstract-full" style="display: none;"> Existing datasets for attribute value extraction (AVE) predominantly focus on explicit attribute values while neglecting the implicit ones, lack product images, are often not publicly available, and lack an in-depth human inspection across diverse domains. To address these limitations, we present ImplicitAVE, the first, publicly available multimodal dataset for implicit attribute value extraction. ImplicitAVE, sourced from the MAVE dataset, is carefully curated and expanded to include implicit AVE and multimodality, resulting in a refined dataset of 68k training and 1.6k testing data across five domains. We also explore the application of multimodal large language models (MLLMs) to implicit AVE, establishing a comprehensive benchmark for MLLMs on the ImplicitAVE dataset. Six recent MLLMs with eleven variants are evaluated across diverse settings, revealing that implicit value extraction remains a challenging task for MLLMs. The contributions of this work include the development and release of ImplicitAVE, and the exploration and benchmarking of various MLLMs for implicit AVE, providing valuable insights and potential future research directions. Dataset and code are available at https://github.com/HenryPengZou/ImplicitAVE <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.15592v2-abstract-full').style.display = 'none'; document.getElementById('2404.15592v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by ACL 2024 (Findings) - Scores: Soundness - 4/4/4, Dataset - 4/4/4, Overall Assessment - 4/3.5/3.5, Meta - 4</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.08886">arXiv:2404.08886</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.08886">pdf</a>, <a href="https://arxiv.org/format/2404.08886">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> EIVEN: Efficient Implicit Attribute Value Extraction using Multimodal LLM </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zou%2C+H+P">Henry Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+G+H">Gavin Heqing Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Fan%2C+Z">Ziwei Fan</a>, <a href="/search/cs?searchtype=author&amp;query=Bu%2C+D">Dan Bu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+H">Han Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Dai%2C+P">Peng Dai</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+D">Dongmei Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Caragea%2C+C">Cornelia Caragea</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.08886v1-abstract-short" style="display: inline;"> In e-commerce, accurately extracting product attribute values from multimodal data is crucial for improving user experience and operational efficiency of retailers. However, previous approaches to multimodal attribute value extraction often struggle with implicit attribute values embedded in images or text, rely heavily on extensive labeled data, and can easily confuse similar attribute values. To&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.08886v1-abstract-full').style.display = 'inline'; document.getElementById('2404.08886v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.08886v1-abstract-full" style="display: none;"> In e-commerce, accurately extracting product attribute values from multimodal data is crucial for improving user experience and operational efficiency of retailers. However, previous approaches to multimodal attribute value extraction often struggle with implicit attribute values embedded in images or text, rely heavily on extensive labeled data, and can easily confuse similar attribute values. To address these issues, we introduce EIVEN, a data- and parameter-efficient generative framework that pioneers the use of multimodal LLM for implicit attribute value extraction. EIVEN leverages the rich inherent knowledge of a pre-trained LLM and vision encoder to reduce reliance on labeled data. We also introduce a novel Learning-by-Comparison technique to reduce model confusion by enforcing attribute value comparison and difference identification. Additionally, we construct initial open-source datasets for multimodal implicit attribute value extraction. Our extensive experiments reveal that EIVEN significantly outperforms existing methods in extracting implicit attribute values while requiring less labeled data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.08886v1-abstract-full').style.display = 'none'; document.getElementById('2404.08886v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by NAACL 2024 Industry Track</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.08638">arXiv:2404.08638</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.08638">pdf</a>, <a href="https://arxiv.org/format/2404.08638">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Age of Information Optimization and State Error Analysis for Correlated Multi-Process Multi-Sensor Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Erbayat%2C+E">Egemen Erbayat</a>, <a href="/search/cs?searchtype=author&amp;query=Maatouk%2C+A">Ali Maatouk</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Subramaniam%2C+S">Suresh Subramaniam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.08638v4-abstract-short" style="display: inline;"> In this paper, we examine a multi-sensor system where each sensor may monitor more than one time-varying information process and send status updates to a remote monitor over a common channel. We consider that each sensor&#39;s status update may contain information about more than one information process in the system subject to the system&#39;s constraints. To investigate the impact of this correlation on&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.08638v4-abstract-full').style.display = 'inline'; document.getElementById('2404.08638v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.08638v4-abstract-full" style="display: none;"> In this paper, we examine a multi-sensor system where each sensor may monitor more than one time-varying information process and send status updates to a remote monitor over a common channel. We consider that each sensor&#39;s status update may contain information about more than one information process in the system subject to the system&#39;s constraints. To investigate the impact of this correlation on the overall system&#39;s performance, we conduct an analysis of both the average Age of Information (AoI) and source state estimation error at the monitor. Building upon this analysis, we subsequently explore the impact of the packet arrivals, correlation probabilities, and rate of processes&#39; state change on the system&#39;s performance. Next, we consider the case where sensors have limited sensing abilities and distribute a portion of their sensing abilities across the different processes. We optimize this distribution to minimize the total AoI of the system. Interestingly, we show that monitoring multiple processes from a single source may not always be beneficial. Our results also reveal that the optimal sensing distribution for diverse arrival rates may exhibit a rapid regime switch, rather than smooth transitions, after crossing critical system values. This highlights the importance of identifying these critical thresholds to ensure effective system performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.08638v4-abstract-full').style.display = 'none'; document.getElementById('2404.08638v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.17785">arXiv:2402.17785</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2402.17785">pdf</a>, <a href="https://arxiv.org/format/2402.17785">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> ByteComposer: a Human-like Melody Composition Method based on Language Model Agent </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liang%2C+X">Xia Liang</a>, <a href="/search/cs?searchtype=author&amp;query=Du%2C+X">Xingjian Du</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+J">Jiaju Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Pei Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Wan%2C+Y">Yuan Wan</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+B">Bilei Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.17785v2-abstract-short" style="display: inline;"> Large Language Models (LLM) have shown encouraging progress in multimodal understanding and generation tasks. However, how to design a human-aligned and interpretable melody composition system is still under-explored. To solve this problem, we propose ByteComposer, an agent framework emulating a human&#39;s creative pipeline in four separate steps : &#34;Conception Analysis - Draft Composition - Self-Eval&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.17785v2-abstract-full').style.display = 'inline'; document.getElementById('2402.17785v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.17785v2-abstract-full" style="display: none;"> Large Language Models (LLM) have shown encouraging progress in multimodal understanding and generation tasks. However, how to design a human-aligned and interpretable melody composition system is still under-explored. To solve this problem, we propose ByteComposer, an agent framework emulating a human&#39;s creative pipeline in four separate steps : &#34;Conception Analysis - Draft Composition - Self-Evaluation and Modification - Aesthetic Selection&#34;. This framework seamlessly blends the interactive and knowledge-understanding features of LLMs with existing symbolic music generation models, thereby achieving a melody composition agent comparable to human creators. We conduct extensive experiments on GPT4 and several open-source large language models, which substantiate our framework&#39;s effectiveness. Furthermore, professional music composers were engaged in multi-dimensional evaluations, the final results demonstrated that across various facets of music composition, ByteComposer agent attains the level of a novice melody composer. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.17785v2-abstract-full').style.display = 'none'; document.getElementById('2402.17785v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.14627">arXiv:2310.14627</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.14627">pdf</a>, <a href="https://arxiv.org/format/2310.14627">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> CrisisMatch: Semi-Supervised Few-Shot Learning for Fine-Grained Disaster Tweet Classification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zou%2C+H+P">Henry Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Y">Yue Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Caragea%2C+C">Cornelia Caragea</a>, <a href="/search/cs?searchtype=author&amp;query=Caragea%2C+D">Doina Caragea</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.14627v1-abstract-short" style="display: inline;"> The shared real-time information about natural disasters on social media platforms like Twitter and Facebook plays a critical role in informing volunteers, emergency managers, and response organizations. However, supervised learning models for monitoring disaster events require large amounts of annotated data, making them unrealistic for real-time use in disaster events. To address this challenge,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.14627v1-abstract-full').style.display = 'inline'; document.getElementById('2310.14627v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.14627v1-abstract-full" style="display: none;"> The shared real-time information about natural disasters on social media platforms like Twitter and Facebook plays a critical role in informing volunteers, emergency managers, and response organizations. However, supervised learning models for monitoring disaster events require large amounts of annotated data, making them unrealistic for real-time use in disaster events. To address this challenge, we present a fine-grained disaster tweet classification model under the semi-supervised, few-shot learning setting where only a small number of annotated data is required. Our model, CrisisMatch, effectively classifies tweets into fine-grained classes of interest using few labeled data and large amounts of unlabeled data, mimicking the early stage of a disaster. Through integrating effective semi-supervised learning ideas and incorporating TextMixUp, CrisisMatch achieves performance improvement on two disaster datasets of 11.2\% on average. Further analyses are also provided for the influence of the number of labeled data and out-of-domain results. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.14627v1-abstract-full').style.display = 'none'; document.getElementById('2310.14627v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by ISCRAM 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.14583">arXiv:2310.14583</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.14583">pdf</a>, <a href="https://arxiv.org/format/2310.14583">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> JointMatch: A Unified Approach for Diverse and Collaborative Pseudo-Labeling to Semi-Supervised Text Classification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zou%2C+H+P">Henry Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Caragea%2C+C">Cornelia Caragea</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.14583v1-abstract-short" style="display: inline;"> Semi-supervised text classification (SSTC) has gained increasing attention due to its ability to leverage unlabeled data. However, existing approaches based on pseudo-labeling suffer from the issues of pseudo-label bias and error accumulation. In this paper, we propose JointMatch, a holistic approach for SSTC that addresses these challenges by unifying ideas from recent semi-supervised learning an&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.14583v1-abstract-full').style.display = 'inline'; document.getElementById('2310.14583v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.14583v1-abstract-full" style="display: none;"> Semi-supervised text classification (SSTC) has gained increasing attention due to its ability to leverage unlabeled data. However, existing approaches based on pseudo-labeling suffer from the issues of pseudo-label bias and error accumulation. In this paper, we propose JointMatch, a holistic approach for SSTC that addresses these challenges by unifying ideas from recent semi-supervised learning and the task of learning with noise. JointMatch adaptively adjusts classwise thresholds based on the learning status of different classes to mitigate model bias towards current easy classes. Additionally, JointMatch alleviates error accumulation by utilizing two differently initialized networks to teach each other in a cross-labeling manner. To maintain divergence between the two networks for mutual learning, we introduce a strategy that weighs more disagreement data while also allowing the utilization of high-quality agreement data for training. Experimental results on benchmark datasets demonstrate the superior performance of JointMatch, achieving a significant 5.13% improvement on average. Notably, JointMatch delivers impressive results even in the extremely-scarce-label setting, obtaining 86% accuracy on AG News with only 5 labels per class. We make our code available at https://github.com/HenryPengZou/JointMatch. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.14583v1-abstract-full').style.display = 'none'; document.getElementById('2310.14583v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by EMNLP 2023 (Main)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.14577">arXiv:2310.14577</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.14577">pdf</a>, <a href="https://arxiv.org/format/2310.14577">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> DeCrisisMB: Debiased Semi-Supervised Learning for Crisis Tweet Classification via Memory Bank </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zou%2C+H+P">Henry Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Y">Yue Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+W">Weizhi Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Caragea%2C+C">Cornelia Caragea</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.14577v1-abstract-short" style="display: inline;"> During crisis events, people often use social media platforms such as Twitter to disseminate information about the situation, warnings, advice, and support. Emergency relief organizations leverage such information to acquire timely crisis circumstances and expedite rescue operations. While existing works utilize such information to build models for crisis event analysis, fully-supervised approache&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.14577v1-abstract-full').style.display = 'inline'; document.getElementById('2310.14577v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.14577v1-abstract-full" style="display: none;"> During crisis events, people often use social media platforms such as Twitter to disseminate information about the situation, warnings, advice, and support. Emergency relief organizations leverage such information to acquire timely crisis circumstances and expedite rescue operations. While existing works utilize such information to build models for crisis event analysis, fully-supervised approaches require annotating vast amounts of data and are impractical due to limited response time. On the other hand, semi-supervised models can be biased, performing moderately well for certain classes while performing extremely poorly for others, resulting in substantially negative effects on disaster monitoring and rescue. In this paper, we first study two recent debiasing methods on semi-supervised crisis tweet classification. Then we propose a simple but effective debiasing method, DeCrisisMB, that utilizes a Memory Bank to store and perform equal sampling for generated pseudo-labels from each class at each training iteration. Extensive experiments are conducted to compare different debiasing methods&#39; performance and generalization ability in both in-distribution and out-of-distribution settings. The results demonstrate the superior performance of our proposed method. Our code is available at https://github.com/HenryPengZou/DeCrisisMB. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.14577v1-abstract-full').style.display = 'none'; document.getElementById('2310.14577v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by EMNLP 2023 (Findings)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.02593">arXiv:2310.02593</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.02593">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> A ModelOps-based Framework for Intelligent Medical Knowledge Extraction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ding%2C+H">Hongxin Ding</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Peinie Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhiyuan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+J">Junfeng Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yasha Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Q">Qiang Zhou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.02593v1-abstract-short" style="display: inline;"> Extracting medical knowledge from healthcare texts enhances downstream tasks like medical knowledge graph construction and clinical decision-making. However, the construction and application of knowledge extraction models lack automation, reusability and unified management, leading to inefficiencies for researchers and high barriers for non-AI experts such as doctors, to utilize knowledge extracti&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.02593v1-abstract-full').style.display = 'inline'; document.getElementById('2310.02593v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.02593v1-abstract-full" style="display: none;"> Extracting medical knowledge from healthcare texts enhances downstream tasks like medical knowledge graph construction and clinical decision-making. However, the construction and application of knowledge extraction models lack automation, reusability and unified management, leading to inefficiencies for researchers and high barriers for non-AI experts such as doctors, to utilize knowledge extraction. To address these issues, we propose a ModelOps-based intelligent medical knowledge extraction framework that offers a low-code system for model selection, training, evaluation and optimization. Specifically, the framework includes a dataset abstraction mechanism based on multi-layer callback functions, a reusable model training, monitoring and management mechanism. We also propose a model recommendation method based on dataset similarity, which helps users quickly find potentially suitable models for a given dataset. Our framework provides convenience for researchers to develop models and simplifies model access for non-AI experts such as doctors. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.02593v1-abstract-full').style.display = 'none'; document.getElementById('2310.02593v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.16247">arXiv:2309.16247</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2309.16247">pdf</a>, <a href="https://arxiv.org/format/2309.16247">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> </div> </div> <p class="title is-5 mathjax"> PP-MeT: a Real-world Personalized Prompt based Meeting Transcription System </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lyu%2C+X">Xiang Lyu</a>, <a href="/search/cs?searchtype=author&amp;query=Cao%2C+Y">Yuhang Cao</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Q">Qing Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yin%2C+J">Jingjing Yin</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Y">Yuguang Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Pengpeng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+Y">Yanni Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Lu%2C+H">Heng Lu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.16247v1-abstract-short" style="display: inline;"> Speaker-attributed automatic speech recognition (SA-ASR) improves the accuracy and applicability of multi-speaker ASR systems in real-world scenarios by assigning speaker labels to transcribed texts. However, SA-ASR poses unique challenges due to factors such as speaker overlap, speaker variability, background noise, and reverberation. In this study, we propose PP-MeT system, a real-world personal&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.16247v1-abstract-full').style.display = 'inline'; document.getElementById('2309.16247v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.16247v1-abstract-full" style="display: none;"> Speaker-attributed automatic speech recognition (SA-ASR) improves the accuracy and applicability of multi-speaker ASR systems in real-world scenarios by assigning speaker labels to transcribed texts. However, SA-ASR poses unique challenges due to factors such as speaker overlap, speaker variability, background noise, and reverberation. In this study, we propose PP-MeT system, a real-world personalized prompt based meeting transcription system, which consists of a clustering system, target-speaker voice activity detection (TS-VAD), and TS-ASR. Specifically, we utilize target-speaker embedding as a prompt in TS-VAD and TS-ASR modules in our proposed system. In constrast with previous system, we fully leverage pre-trained models for system initialization, thereby bestowing our approach with heightened generalizability and precision. Experiments on M2MeT2.0 Challenge dataset show that our system achieves a cp-CER of 11.27% on the test set, ranking first in both fixed and open training conditions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.16247v1-abstract-full').style.display = 'none'; document.getElementById('2309.16247v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2304.12256">arXiv:2304.12256</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2304.12256">pdf</a>, <a href="https://arxiv.org/format/2304.12256">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> How Costly Was That (In)Decision? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Maatouk%2C+A">Ali Maatouk</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Jin Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Subramaniam%2C+S">Suresh Subramaniam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2304.12256v1-abstract-short" style="display: inline;"> In this paper, we introduce a new metric, named Penalty upon Decision (PuD), for measuring the impact of communication delays and state changes at the source on a remote decision maker. Specifically, the metric quantifies the performance degradation at the decision maker&#39;s side due to delayed, erroneous, and (possibly) missed decisions. We clarify the rationale for the metric and derive closed-for&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.12256v1-abstract-full').style.display = 'inline'; document.getElementById('2304.12256v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2304.12256v1-abstract-full" style="display: none;"> In this paper, we introduce a new metric, named Penalty upon Decision (PuD), for measuring the impact of communication delays and state changes at the source on a remote decision maker. Specifically, the metric quantifies the performance degradation at the decision maker&#39;s side due to delayed, erroneous, and (possibly) missed decisions. We clarify the rationale for the metric and derive closed-form expressions for its average in M/GI/1 and M/GI/1/1 with blocking settings. Numerical results are then presented to support our expressions and to compare the infinite and zero buffer regimes. Interestingly, comparing these two settings sheds light on a buffer length design challenge that is essential to minimize the average PuD. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.12256v1-abstract-full').style.display = 'none'; document.getElementById('2304.12256v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.10561">arXiv:2303.10561</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2303.10561">pdf</a>, <a href="https://arxiv.org/format/2303.10561">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Spatial-temporal Transformer for Affective Behavior Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+R">Rui Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Wen%2C+K">Kehua Wen</a>, <a href="/search/cs?searchtype=author&amp;query=Peng%2C+Y">Yasi Peng</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+X">Xiao Sun</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.10561v1-abstract-short" style="display: inline;"> The in-the-wild affective behavior analysis has been an important study. In this paper, we submit our solutions for the 5th Workshop and Competition on Affective Behavior Analysis in-the-wild (ABAW), which includes V-A Estimation, Facial Expression Classification and AU Detection Sub-challenges. We propose a Transformer Encoder with Multi-Head Attention framework to learn the distribution of both&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.10561v1-abstract-full').style.display = 'inline'; document.getElementById('2303.10561v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.10561v1-abstract-full" style="display: none;"> The in-the-wild affective behavior analysis has been an important study. In this paper, we submit our solutions for the 5th Workshop and Competition on Affective Behavior Analysis in-the-wild (ABAW), which includes V-A Estimation, Facial Expression Classification and AU Detection Sub-challenges. We propose a Transformer Encoder with Multi-Head Attention framework to learn the distribution of both the spatial and temporal features. Besides, there are virious effective data augmentation strategies employed to alleviate the problems of sample imbalance during model training. The results fully demonstrate the effectiveness of our proposed model based on the Aff-Wild2 dataset. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.10561v1-abstract-full').style.display = 'none'; document.getElementById('2303.10561v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2208.03051">arXiv:2208.03051</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2208.03051">pdf</a>, <a href="https://arxiv.org/format/2208.03051">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Hybrid Multimodal Feature Extraction, Mining and Fusion for Sentiment Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+J">Jia Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Ziyang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Lang%2C+J">Junjie Lang</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+Y">Yueqi Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=An%2C+L">Liuwei An</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Y">Yangyang Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+S">Sheng Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+J">Jie Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Fan%2C+C">Chunxiao Fan</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+X">Xiao Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+M">Meng Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2208.03051v2-abstract-short" style="display: inline;"> In this paper, we present our solutions for the Multimodal Sentiment Analysis Challenge (MuSe) 2022, which includes MuSe-Humor, MuSe-Reaction and MuSe-Stress Sub-challenges. The MuSe 2022 focuses on humor detection, emotional reactions and multimodal emotional stress utilizing different modalities and data sets. In our work, different kinds of multimodal features are extracted, including acoustic,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.03051v2-abstract-full').style.display = 'inline'; document.getElementById('2208.03051v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2208.03051v2-abstract-full" style="display: none;"> In this paper, we present our solutions for the Multimodal Sentiment Analysis Challenge (MuSe) 2022, which includes MuSe-Humor, MuSe-Reaction and MuSe-Stress Sub-challenges. The MuSe 2022 focuses on humor detection, emotional reactions and multimodal emotional stress utilizing different modalities and data sets. In our work, different kinds of multimodal features are extracted, including acoustic, visual, text and biological features. These features are fused by TEMMA and GRU with self-attention mechanism frameworks. In this paper, 1) several new audio features, facial expression features and paragraph-level text embeddings are extracted for accuracy improvement. 2) we substantially improve the accuracy and reliability of multimodal sentiment prediction by mining and blending the multimodal features. 3) effective data augmentation strategies are applied in model training to alleviate the problem of sample imbalance and prevent the model from learning biased subject characters. For the MuSe-Humor sub-challenge, our model obtains the AUC score of 0.8932. For the MuSe-Reaction sub-challenge, the Pearson&#39;s Correlations Coefficient of our approach on the test set is 0.3879, which outperforms all other participants. For the MuSe-Stress sub-challenge, our approach outperforms the baseline in both arousal and valence on the test dataset, reaching a final combined result of 0.5151. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.03051v2-abstract-full').style.display = 'none'; document.getElementById('2208.03051v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 2 figures, to appear in MuSe 2022 (ACM MM2022 co-located workshop)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.08224">arXiv:2206.08224</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2206.08224">pdf</a>, <a href="https://arxiv.org/format/2206.08224">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Multi scale Feature Extraction and Fusion for Online Knowledge Distillation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Panpan Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Teng%2C+Y">Yinglei Teng</a>, <a href="/search/cs?searchtype=author&amp;query=Niu%2C+T">Tao Niu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.08224v1-abstract-short" style="display: inline;"> Online knowledge distillation conducts knowledge transfer among all student models to alleviate the reliance on pre-trained models. However, existing online methods rely heavily on the prediction distributions and neglect the further exploration of the representational knowledge. In this paper, we propose a novel Multi-scale Feature Extraction and Fusion method (MFEF) for online knowledge distilla&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.08224v1-abstract-full').style.display = 'inline'; document.getElementById('2206.08224v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.08224v1-abstract-full" style="display: none;"> Online knowledge distillation conducts knowledge transfer among all student models to alleviate the reliance on pre-trained models. However, existing online methods rely heavily on the prediction distributions and neglect the further exploration of the representational knowledge. In this paper, we propose a novel Multi-scale Feature Extraction and Fusion method (MFEF) for online knowledge distillation, which comprises three key components: Multi-scale Feature Extraction, Dual-attention and Feature Fusion, towards generating more informative feature maps for distillation. The multiscale feature extraction exploiting divide-and-concatenate in channel dimension is proposed to improve the multi-scale representation ability of feature maps. To obtain more accurate information, we design a dual-attention to strengthen the important channel and spatial regions adaptively. Moreover, we aggregate and fuse the former processed feature maps via feature fusion to assist the training of student models. Extensive experiments on CIF AR-10, CIF AR-100, and CINIC-10 show that MFEF transfers more beneficial representational knowledge for distillation and outperforms alternative methods among various network architectures <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.08224v1-abstract-full').style.display = 'none'; document.getElementById('2206.08224v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 3 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.08186">arXiv:2206.08186</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2206.08186">pdf</a>, <a href="https://arxiv.org/format/2206.08186">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Asymptotic Soft Cluster Pruning for Deep Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Niu%2C+T">Tao Niu</a>, <a href="/search/cs?searchtype=author&amp;query=Teng%2C+Y">Yinglei Teng</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Panpan Zou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.08186v1-abstract-short" style="display: inline;"> Filter pruning method introduces structural sparsity by removing selected filters and is thus particularly effective for reducing complexity. Previous works empirically prune networks from the point of view that filter with smaller norm contributes less to the final results. However, such criteria has been proven sensitive to the distribution of filters, and the accuracy may hard to recover since&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.08186v1-abstract-full').style.display = 'inline'; document.getElementById('2206.08186v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.08186v1-abstract-full" style="display: none;"> Filter pruning method introduces structural sparsity by removing selected filters and is thus particularly effective for reducing complexity. Previous works empirically prune networks from the point of view that filter with smaller norm contributes less to the final results. However, such criteria has been proven sensitive to the distribution of filters, and the accuracy may hard to recover since the capacity gap is fixed once pruned. In this paper, we propose a novel filter pruning method called Asymptotic Soft Cluster Pruning (ASCP), to identify the redundancy of network based on the similarity of filters. Each filter from over-parameterized network is first distinguished by clustering, and then reconstructed to manually introduce redundancy into it. Several guidelines of clustering are proposed to better preserve feature extraction ability. After reconstruction, filters are allowed to be updated to eliminate the effect caused by mistakenly selected. Besides, various decaying strategies of the pruning rate are adopted to stabilize the pruning process and improve the final performance as well. By gradually generating more identical filters within each cluster, ASCP can remove them through channel addition operation with almost no accuracy drop. Extensive experiments on CIFAR-10 and ImageNet datasets show that our method can achieve competitive results compared with many state-of-the-art algorithms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.08186v1-abstract-full').style.display = 'none'; document.getElementById('2206.08186v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2202.12081">arXiv:2202.12081</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2202.12081">pdf</a>, <a href="https://arxiv.org/format/2202.12081">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> </div> </div> <p class="title is-5 mathjax"> Community Trend Prediction on Heterogeneous Graph in E-commerce </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yuan%2C+J">Jiahao Yuan</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Z">Zhao Li</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Pengcheng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+X">Xuan Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+J">Jinwei Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Ji%2C+W">Wendi Ji</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+X">Xiaoling Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2202.12081v1-abstract-short" style="display: inline;"> In online shopping, ever-changing fashion trends make merchants need to prepare more differentiated products to meet the diversified demands, and e-commerce platforms need to capture the market trend with a prophetic vision. For the trend prediction, the attribute tags, as the essential description of items, can genuinely reflect the decision basis of consumers. However, few existing works explore&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.12081v1-abstract-full').style.display = 'inline'; document.getElementById('2202.12081v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2202.12081v1-abstract-full" style="display: none;"> In online shopping, ever-changing fashion trends make merchants need to prepare more differentiated products to meet the diversified demands, and e-commerce platforms need to capture the market trend with a prophetic vision. For the trend prediction, the attribute tags, as the essential description of items, can genuinely reflect the decision basis of consumers. However, few existing works explore the attribute trend in the specific community for e-commerce. In this paper, we focus on the community trend prediction on the item attribute and propose a unified framework that combines the dynamic evolution of two graph patterns to predict the attribute trend in a specific community. Specifically, we first design a communityattribute bipartite graph at each time step to learn the collaboration of different communities. Next, we transform the bipartite graph into a hypergraph to exploit the associations of different attribute tags in one community. Lastly, we introduce a dynamic evolution component based on the recurrent neural networks to capture the fashion trend of attribute tags. Extensive experiments on three real-world datasets in a large e-commerce platform show the superiority of the proposed approach over several strong alternatives and demonstrate the ability to discover the community trend in advance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.12081v1-abstract-full').style.display = 'none'; document.getElementById('2202.12081v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published as a full paper at WSDM 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2201.02968">arXiv:2201.02968</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2201.02968">pdf</a>, <a href="https://arxiv.org/format/2201.02968">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> An Adaptive Device-Edge Co-Inference Framework Based on Soft Actor-Critic </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Niu%2C+T">Tao Niu</a>, <a href="/search/cs?searchtype=author&amp;query=Teng%2C+Y">Yinglei Teng</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+Z">Zhu Han</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Panpan Zou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2201.02968v1-abstract-short" style="display: inline;"> Recently, the applications of deep neural network (DNN) have been very prominent in many fields such as computer vision (CV) and natural language processing (NLP) due to its superior feature extraction performance. However, the high-dimension parameter model and large-scale mathematical calculation restrict the execution efficiency, especially for Internet of Things (IoT) devices. Different from t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.02968v1-abstract-full').style.display = 'inline'; document.getElementById('2201.02968v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2201.02968v1-abstract-full" style="display: none;"> Recently, the applications of deep neural network (DNN) have been very prominent in many fields such as computer vision (CV) and natural language processing (NLP) due to its superior feature extraction performance. However, the high-dimension parameter model and large-scale mathematical calculation restrict the execution efficiency, especially for Internet of Things (IoT) devices. Different from the previous cloud/edge-only pattern that brings huge pressure for uplink communication and device-only fashion that undertakes unaffordable calculation strength, we highlight the collaborative computation between the device and edge for DNN models, which can achieve a good balance between the communication load and execution accuracy. Specifically, a systematic on-demand co-inference framework is proposed to exploit the multi-branch structure, in which the pre-trained Alexnet is right-sized through \emph{early-exit} and partitioned at an intermediate DNN layer. The integer quantization is enforced to further compress transmission bits. As a result, we establish a new Deep Reinforcement Learning (DRL) optimizer-Soft Actor Critic for discrete (SAC-d), which generates the \emph{exit point}, \emph{partition point}, and \emph{compressing bits} by soft policy iterations. Based on the latency and accuracy aware reward design, such an optimizer can well adapt to the complex environment like dynamic wireless channel and arbitrary CPU processing, and is capable of supporting the 5G URLLC. Real-world experiment on Raspberry Pi 4 and PC shows the outperformance of the proposed solution. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.02968v1-abstract-full').style.display = 'none'; document.getElementById('2201.02968v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.05725">arXiv:2112.05725</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2112.05725">pdf</a>, <a href="https://arxiv.org/ps/2112.05725">ps</a>, <a href="https://arxiv.org/format/2112.05725">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> </div> </div> <p class="title is-5 mathjax"> Beyond the Longest Letter-duplicated Subsequence Problem </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lai%2C+W">Wenfeng Lai</a>, <a href="/search/cs?searchtype=author&amp;query=Liyanage%2C+A">Adiesha Liyanage</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+B">Binhai Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Peng Zou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.05725v2-abstract-short" style="display: inline;"> Given a sequence $S$ of length $n$, a letter-duplicated subsequence is a subsequence of $S$ in the form of $x_1^{d_1}x_2^{d_2}\cdots x_k^{d_k}$ with $x_i\in危$, $x_j\neq x_{j+1}$ and $d_i\geq 2$ for all $i$ in $[k]$ and $j$ in $[k-1]$. A linear time algorithm for computing the longest letter-duplicated subsequence (LLDS) of $S$ can be easily obtained. In this paper, we focus on two variants of this&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.05725v2-abstract-full').style.display = 'inline'; document.getElementById('2112.05725v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.05725v2-abstract-full" style="display: none;"> Given a sequence $S$ of length $n$, a letter-duplicated subsequence is a subsequence of $S$ in the form of $x_1^{d_1}x_2^{d_2}\cdots x_k^{d_k}$ with $x_i\in危$, $x_j\neq x_{j+1}$ and $d_i\geq 2$ for all $i$ in $[k]$ and $j$ in $[k-1]$. A linear time algorithm for computing the longest letter-duplicated subsequence (LLDS) of $S$ can be easily obtained. In this paper, we focus on two variants of this problem. We first consider the constrained version when $危$ is unbounded, each letter appears in $S$ at least 6 times and all the letters in $危$ must appear in the solution. We show that the problem is NP-hard (a further twist indicates that the problem does not admit any polynomial time approximation). The reduction is from possibly the simplest version of SAT that is NP-complete, $(\leq 2,1,\leq 3)$-SAT, where each variable appears at most twice positively and exact once negatively, and each clause contains at most three literals and some clauses must contain exactly two literals. (We hope that this technique will serve as a general tool to help us proving the NP-hardness for some more tricky sequence problems involving only one sequence -- much harder than with at least two input sequences, which we apply successfully at the end of the paper on some extra variations of the LLDS problem.) We then show that when each letter appears in $S$ at most 3 times, then the problem admits a factor $1.5-O(\frac{1}{n})$ approximation. Finally, we consider the weighted version, where the weight of a block $x_i^{d_i} (d_i\geq 2)$ could be any positive function which might not grow with $d_i$. We give a non-trivial $O(n^2)$ time dynamic programming algorithm for this version, i.e., computing an LD-subsequence of $S$ whose weight is maximized. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.05725v2-abstract-full').style.display = 'none'; document.getElementById('2112.05725v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68W01; 68W32 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.05020">arXiv:2110.05020</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.05020">pdf</a>, <a href="https://arxiv.org/format/2110.05020">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> MELONS: generating melody with long-term structure using transformers and structure graph </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zou%2C+Y">Yi Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Pei Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Zhao%2C+Y">Yi Zhao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+K">Kaixiang Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+R">Ran Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+X">Xiaorui Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.05020v3-abstract-short" style="display: inline;"> The creation of long melody sequences requires effective expression of coherent musical structure. However, there is no clear representation of musical structure. Recent works on music generation have suggested various approaches to deal with the structural information of music, but generating a full-song melody with clear long-term structure remains a challenge. In this paper, we propose MELONS,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.05020v3-abstract-full').style.display = 'inline'; document.getElementById('2110.05020v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.05020v3-abstract-full" style="display: none;"> The creation of long melody sequences requires effective expression of coherent musical structure. However, there is no clear representation of musical structure. Recent works on music generation have suggested various approaches to deal with the structural information of music, but generating a full-song melody with clear long-term structure remains a challenge. In this paper, we propose MELONS, a melody generation framework based on a graph representation of music structure which consists of eight types of bar-level relations. MELONS adopts a multi-step generation method with transformer-based networks by factoring melody generation into two sub-problems: structure generation and structure conditional melody generation. Experimental results show that MELONS can produce structured melodies with high quality and rich contents. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.05020v3-abstract-full').style.display = 'none'; document.getElementById('2110.05020v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.14062">arXiv:2109.14062</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2109.14062">pdf</a>, <a href="https://arxiv.org/ps/2109.14062">ps</a>, <a href="https://arxiv.org/format/2109.14062">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Overage and Staleness Metrics for Status Update Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Jin Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Wei%2C+X">Xianglin Wei</a>, <a href="/search/cs?searchtype=author&amp;query=Subramaniam%2C+S">Suresh Subramaniam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.14062v2-abstract-short" style="display: inline;"> Status update systems consist of sensors that take measurements of a physical parameter and transmit them to a remote receiver. Age of Information (AoI) has been studied extensively as a metric for the freshness of information in such systems with and without an enforced hard or soft deadline. In this paper, we propose three metrics for status update systems to measure the ability of different que&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.14062v2-abstract-full').style.display = 'inline'; document.getElementById('2109.14062v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.14062v2-abstract-full" style="display: none;"> Status update systems consist of sensors that take measurements of a physical parameter and transmit them to a remote receiver. Age of Information (AoI) has been studied extensively as a metric for the freshness of information in such systems with and without an enforced hard or soft deadline. In this paper, we propose three metrics for status update systems to measure the ability of different queuing systems to meet a threshold requirement for the AoI. The {\em overage probability} is defined as the probability that the age of the most recent update packet held by the receiver is larger than the threshold. The {\em stale update probability} is the probability that an update is stale, i.e., its age has exceeded the deadline, when it is delivered to the receiver. Finally, the {\em average overage} is defined as the time average of the overage (i.e., age beyond the threshold), and is a measure of the average ``staleness&#39;&#39; of the update packets held by the receiver. We investigate these metrics in three typical status update queuing systems -- M/G/1/1, M/G/1/$2^*$, and M/M/1. Numerical results show the performances for these metrics under different parameter settings and different service distributions. The differences between the average overage and average AoI are also shown. Our results demonstrate that a lower bound exists for the stale update probability when the buffer size is limited. Further, we observe that the overage probability decreases and the stale update probability increases as the update arrival rate increases. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.14062v2-abstract-full').style.display = 'none'; document.getElementById('2109.14062v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2011.04166">arXiv:2011.04166</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2011.04166">pdf</a>, <a href="https://arxiv.org/format/2011.04166">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Distant Supervision for E-commerce Query Segmentation via Attention Network </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+Z">Zhao Li</a>, <a href="/search/cs?searchtype=author&amp;query=Ding%2C+D">Donghui Ding</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Pengcheng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Gong%2C+Y">Yu Gong</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+X">Xi Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Ji Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Gao%2C+J">Jianliang Gao</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Y">Youxi Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Duan%2C+Y">Yucong Duan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2011.04166v1-abstract-short" style="display: inline;"> The booming online e-commerce platforms demand highly accurate approaches to segment queries that carry the product requirements of consumers. Recent works have shown that the supervised methods, especially those based on deep learning, are attractive for achieving better performance on the problem of query segmentation. However, the lack of labeled data is still a big challenge for training a dee&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.04166v1-abstract-full').style.display = 'inline'; document.getElementById('2011.04166v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2011.04166v1-abstract-full" style="display: none;"> The booming online e-commerce platforms demand highly accurate approaches to segment queries that carry the product requirements of consumers. Recent works have shown that the supervised methods, especially those based on deep learning, are attractive for achieving better performance on the problem of query segmentation. However, the lack of labeled data is still a big challenge for training a deep segmentation network, and the problem of Out-of-Vocabulary (OOV) also adversely impacts the performance of query segmentation. Different from query segmentation task in an open domain, e-commerce scenario can provide external documents that are closely related to these queries. Thus, to deal with the two challenges, we employ the idea of distant supervision and design a novel method to find contexts in external documents and extract features from these contexts. In this work, we propose a BiLSTM-CRF based model with an attention module to encode external features, such that external contexts information, which can be utilized naturally and effectively to help query segmentation. Experiments on two datasets show the effectiveness of our approach compared with several kinds of baselines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.04166v1-abstract-full').style.display = 'none'; document.getElementById('2011.04166v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2003.14069">arXiv:2003.14069</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2003.14069">pdf</a>, <a href="https://arxiv.org/ps/2003.14069">ps</a>, <a href="https://arxiv.org/format/2003.14069">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> On Age and Value of Information in Status Update Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Ozel%2C+O">Omur Ozel</a>, <a href="/search/cs?searchtype=author&amp;query=Subramaniam%2C+S">Suresh Subramaniam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2003.14069v1-abstract-short" style="display: inline;"> Motivated by the inherent value of packets arising in many cyber-physical applications (e.g., due to precision of the information content or an alarm message), we consider status update systems with update packets carrying values as well as their generation time stamps. Once generated, a status update packet has a random initial value and a deterministic deadline after which it is not useful (ulti&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.14069v1-abstract-full').style.display = 'inline'; document.getElementById('2003.14069v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2003.14069v1-abstract-full" style="display: none;"> Motivated by the inherent value of packets arising in many cyber-physical applications (e.g., due to precision of the information content or an alarm message), we consider status update systems with update packets carrying values as well as their generation time stamps. Once generated, a status update packet has a random initial value and a deterministic deadline after which it is not useful (ultimate staleness). In our model, value of a packet decreases in time (even after reception) starting from its generation to ultimate staleness when it vanishes. The value of information (VoI) at the receiver is additive in that the VoI is the sum of the current values of all packets held by the receiver. We investigate various queuing disciplines under potential dependence between value and service time and provide closed form expressions for average VoI at the receiver. Numerical results illustrate the average VoI for different scenarios and the contrast between average age of information (AoI) and average VoI. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.14069v1-abstract-full').style.display = 'none'; document.getElementById('2003.14069v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 March, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2003.13577">arXiv:2003.13577</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2003.13577">pdf</a>, <a href="https://arxiv.org/ps/2003.13577">ps</a>, <a href="https://arxiv.org/format/2003.13577">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> Maintaining Information Freshness in Power-Efficient Status Update Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Rafiee%2C+P">Parisa Rafiee</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Ozel%2C+O">Omur Ozel</a>, <a href="/search/cs?searchtype=author&amp;query=Subramaniam%2C+S">Suresh Subramaniam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2003.13577v1-abstract-short" style="display: inline;"> This paper is motivated by emerging edge computing systems which consist of sensor nodes that acquire and process information and then transmit status updates to an edge receiver for possible further processing. As power is a scarce resource at the sensor nodes, the system is modeled as a tandem computation-transmission queue with power-efficient computing. Jobs arrive at the computation server wi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.13577v1-abstract-full').style.display = 'inline'; document.getElementById('2003.13577v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2003.13577v1-abstract-full" style="display: none;"> This paper is motivated by emerging edge computing systems which consist of sensor nodes that acquire and process information and then transmit status updates to an edge receiver for possible further processing. As power is a scarce resource at the sensor nodes, the system is modeled as a tandem computation-transmission queue with power-efficient computing. Jobs arrive at the computation server with rate $位$ as a Poisson process with no available data buffer. The computation server can be in one of three states: (i) OFF: the server is turned off and no jobs are observed or processed, (ii) ON-Idle: the server is turned on but there is no job in the server, (iii) ON-Busy: the server is turned on and a job is processed in the server. These states cost zero, one and $p_c$ units of power, respectively. Under a long-term power constraint, the computation server switches from one state to another in sequence: first a deterministic $T_o$ time units in OFF state, then waiting for a job arrival in ON-Idle state and then in ON-Busy state for an independent identically distributed compute time duration. The transmission server has a single unit data buffer to save incoming packets and applies last come first serve with discarding as well as a packet deadline to discard a sitting packet for maintaining information freshness, which is measured by the Age of Information (AoI). Additionally, there is a monotonic functional relation between the mean time spent in ON-Busy state and the mean transmission time. We obtain closed-form expressions for average AoI and average peak AoI. Our numerical results illustrate various regimes of operation for best AoI performances optimized over packet deadlines with relation to power efficiency. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.13577v1-abstract-full').style.display = 'none'; document.getElementById('2003.13577v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 March, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2002.04778">arXiv:2002.04778</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2002.04778">pdf</a>, <a href="https://arxiv.org/format/2002.04778">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> </div> </div> <p class="title is-5 mathjax"> Genomic Problems Involving Copy Number Profiles: Complexity and Algorithms </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lafond%2C+M">Manuel Lafond</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+B">Binhai Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Peng Zou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2002.04778v1-abstract-short" style="display: inline;"> Recently, due to the genomic sequence analysis in several types of cancer, the genomic data based on {\em copy number profiles} ({\em CNP} for short) are getting more and more popular. A CNP is a vector where each component is a non-negative integer representing the number of copies of a specific gene or segment of interest. In this paper, we present two streams of results. The first is the nega&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.04778v1-abstract-full').style.display = 'inline'; document.getElementById('2002.04778v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2002.04778v1-abstract-full" style="display: none;"> Recently, due to the genomic sequence analysis in several types of cancer, the genomic data based on {\em copy number profiles} ({\em CNP} for short) are getting more and more popular. A CNP is a vector where each component is a non-negative integer representing the number of copies of a specific gene or segment of interest. In this paper, we present two streams of results. The first is the negative results on two open problems regarding the computational complexity of the Minimum Copy Number Generation (MCNG) problem posed by Qingge et al. in 2018. It was shown by Qingge et al. that the problem is NP-hard if the duplications are tandem and they left the open question of whether the problem remains NP-hard if arbitrary duplications are used. We answer this question affirmatively in this paper; in fact, we prove that it is NP-hard to even obtain a constant factor approximation. We also prove that the parameterized version is W[1]-hard, answering another open question by Qingge et al. The other result is positive and is based on a new (and more general) problem regarding CNP&#39;s. The \emph{Copy Number Profile Conforming (CNPC)} problem is formally defined as follows: given two CNP&#39;s $C_1$ and $C_2$, compute two strings $S_1$ and $S_2$ with $cnp(S_1)=C_1$ and $cnp(S_2)=C_2$ such that the distance between $S_1$ and $S_2$, $d(S_1,S_2)$, is minimized. Here, $d(S_1,S_2)$ is a very general term, which means it could be any genome rearrangement distance (like reversal, transposition, and tandem duplication, etc). We make the first step by showing that if $d(S_1,S_2)$ is measured by the breakpoint distance then the problem is polynomially solvable. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.04778v1-abstract-full').style.display = 'none'; document.getElementById('2002.04778v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 pages, 3 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> F.2.2; J.3 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1912.02692">arXiv:1912.02692</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1912.02692">pdf</a>, <a href="https://arxiv.org/ps/1912.02692">ps</a>, <a href="https://arxiv.org/format/1912.02692">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> Optimizing Information Freshness Through Computation-Transmission Tradeoff and Queue Management in Edge Computing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Ozel%2C+O">Omur Ozel</a>, <a href="/search/cs?searchtype=author&amp;query=Subramaniam%2C+S">Suresh Subramaniam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1912.02692v1-abstract-short" style="display: inline;"> Edge computing applications typically require generated data to be preprocessed at the source and then transmitted to an edge server. In such cases, transmission time and preprocessing time are coupled, yielding a tradeoff between them to achieve the targeted objective. This paper presents analysis of such a system with the objective of optimizing freshness of received data at the edge server. We&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1912.02692v1-abstract-full').style.display = 'inline'; document.getElementById('1912.02692v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1912.02692v1-abstract-full" style="display: none;"> Edge computing applications typically require generated data to be preprocessed at the source and then transmitted to an edge server. In such cases, transmission time and preprocessing time are coupled, yielding a tradeoff between them to achieve the targeted objective. This paper presents analysis of such a system with the objective of optimizing freshness of received data at the edge server. We model this system as two queues in tandem whose service times are independent over time but the transmission service time is monotonically dependent on the computation service time in mean value. This dependence captures the natural decrease in transmission time due to lower offloaded computation. We analyze various queue management schemes in this tandem queue where the first queue has a single server, Poisson packet arrivals, general independent service and no extra buffer to save incoming status update packets. The second queue has a single server receiving packets from the first queue and service is memoryless. We consider the second queue in two forms: (i) No data buffer and (ii) One unit data buffer and last come first serve with discarding. We analyze various non-preemptive as well as preemptive cases. We perform stationary distribution analysis and obtain closed form expressions for average age of information (AoI) and average peak AoI. Our numerical results illustrate analytical findings on how computation and transmission times could be traded off to optimize AoI and reveal a consequent tradeoff between average AoI and average peak AoI. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1912.02692v1-abstract-full').style.display = 'none'; document.getElementById('1912.02692v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 December, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: substantial text overlap with arXiv:1907.00928</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1907.00928">arXiv:1907.00928</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1907.00928">pdf</a>, <a href="https://arxiv.org/ps/1907.00928">ps</a>, <a href="https://arxiv.org/format/1907.00928">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> Trading Off Computation with Transmission in Status Update Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Ozel%2C+O">Omur Ozel</a>, <a href="/search/cs?searchtype=author&amp;query=Subramaniam%2C+S">Suresh Subramaniam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1907.00928v1-abstract-short" style="display: inline;"> This paper is motivated by emerging edge computing applications in which generated data are pre-processed at the source and then transmitted to an edge server. In such a scenario, there is typically a tradeoff between the amount of pre-processing and the amount of data to be transmitted. We model such a system by considering two non-preemptive queues in tandem whose service times are independent o&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.00928v1-abstract-full').style.display = 'inline'; document.getElementById('1907.00928v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1907.00928v1-abstract-full" style="display: none;"> This paper is motivated by emerging edge computing applications in which generated data are pre-processed at the source and then transmitted to an edge server. In such a scenario, there is typically a tradeoff between the amount of pre-processing and the amount of data to be transmitted. We model such a system by considering two non-preemptive queues in tandem whose service times are independent over time but the transmission service time is dependent on the computation service time in mean value. The first queue is in M/GI/1/1 form with a single server, memoryless exponential arrivals, general independent service and no extra buffer to save incoming status update packets. The second queue is in GI/M/1/2* form with a single server receiving packets from the first queue, memoryless service and a single data buffer to save incoming packets. Additionally, mean service times of the first and second queues are dependent through a deterministic monotonic function. We perform stationary distribution analysis in this system and obtain closed form expressions for average age of information (AoI) and average peak AoI. Our numerical results illustrate the analytical findings and highlight the tradeoff between average AoI and average peak AoI generated by the tandem nature of the queueing system with dependent service times. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.00928v1-abstract-full').style.display = 'none'; document.getElementById('1907.00928v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 July, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1906.05266">arXiv:1906.05266</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1906.05266">pdf</a>, <a href="https://arxiv.org/ps/1906.05266">ps</a>, <a href="https://arxiv.org/format/1906.05266">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> </div> </div> <p class="title is-5 mathjax"> The Tandem Duplication Distance is NP-hard </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lafond%2C+M">Manuel Lafond</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+B">Binhai Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Peng Zou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1906.05266v1-abstract-short" style="display: inline;"> In computational biology, tandem duplication is an important biological phenomenon which can occur either at the genome or at the DNA level. A tandem duplication takes a copy of a genome segment and inserts it right after the segment - this can be represented as the string operation $AXB \Rightarrow AXXB$. For example, Tandem exon duplications have been found in many species such as human, fly or&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1906.05266v1-abstract-full').style.display = 'inline'; document.getElementById('1906.05266v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1906.05266v1-abstract-full" style="display: none;"> In computational biology, tandem duplication is an important biological phenomenon which can occur either at the genome or at the DNA level. A tandem duplication takes a copy of a genome segment and inserts it right after the segment - this can be represented as the string operation $AXB \Rightarrow AXXB$. For example, Tandem exon duplications have been found in many species such as human, fly or worm, and have been largely studied in computational biology. The Tandem Duplication (TD) distance problem we investigate in this paper is defined as follows: given two strings $S$ and $T$ over the same alphabet, compute the smallest sequence of tandem duplications required to convert $S$ to $T$. The natural question of whether the TD distance can be computed in polynomial time was posed in 2004 by Leupold et al. and had remained open, despite the fact that tandem duplications have received much attention ever since. In this paper, we prove that this problem is NP-hard. We further show that this hardness holds even if all characters of $S$ are distinct. This is known as the exemplar TD distance, which is of special relevance in bioinformatics. One of the tools we develop for the reduction is a new problem called the Cost-Effective Subgraph, for which we obtain W[1]-hardness results that might be of independent interest. We finally show that computing the exemplar TD distance between $S$ and $T$ is fixed-parameter tractable. Our results open the door to many other questions, and we conclude with several open problems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1906.05266v1-abstract-full').style.display = 'none'; document.getElementById('1906.05266v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 June, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1904.01735">arXiv:1904.01735</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1904.01735">pdf</a>, <a href="https://arxiv.org/format/1904.01735">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Multi-Modal Generative Adversarial Network for Short Product Title Generation in Mobile E-Commerce </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Jian-Guo Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Pengcheng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Z">Zhao Li</a>, <a href="/search/cs?searchtype=author&amp;query=Wan%2C+Y">Yao Wan</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+X">Xiuming Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Gong%2C+Y">Yu Gong</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+P+S">Philip S. Yu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1904.01735v1-abstract-short" style="display: inline;"> Nowadays, more and more customers browse and purchase products in favor of using mobile E-Commerce Apps such as Taobao and Amazon. Since merchants are usually inclined to describe redundant and over-informative product titles to attract attentions from customers, it is important to concisely display short product titles on limited screen of mobile phones. To address this discrepancy, previous stud&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.01735v1-abstract-full').style.display = 'inline'; document.getElementById('1904.01735v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1904.01735v1-abstract-full" style="display: none;"> Nowadays, more and more customers browse and purchase products in favor of using mobile E-Commerce Apps such as Taobao and Amazon. Since merchants are usually inclined to describe redundant and over-informative product titles to attract attentions from customers, it is important to concisely display short product titles on limited screen of mobile phones. To address this discrepancy, previous studies mainly consider textual information of long product titles and lacks of human-like view during training and evaluation process. In this paper, we propose a Multi-Modal Generative Adversarial Network (MM-GAN) for short product title generation in E-Commerce, which innovatively incorporates image information and attribute tags from product, as well as textual information from original long titles. MM-GAN poses short title generation as a reinforcement learning process, where the generated titles are evaluated by the discriminator in a human-like view. Extensive experiments on a large-scale E-Commerce dataset demonstrate that our algorithm outperforms other state-of-the-art methods. Moreover, we deploy our model into a real-world online E-Commerce environment and effectively boost the performance of click through rate and click conversion rate by 1.66% and 1.87%, respectively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.01735v1-abstract-full').style.display = 'none'; document.getElementById('1904.01735v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 April, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by NAACL-HLT 2019. arXiv admin note: substantial text overlap with arXiv:1811.04498</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1901.05428">arXiv:1901.05428</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1901.05428">pdf</a>, <a href="https://arxiv.org/ps/1901.05428">ps</a>, <a href="https://arxiv.org/format/1901.05428">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> Relative Age of Information: A New Metric for Status Update Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Ozel%2C+O">Omur Ozel</a>, <a href="/search/cs?searchtype=author&amp;query=Subramaniam%2C+S">Suresh Subramaniam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1901.05428v3-abstract-short" style="display: inline;"> In this paper, we introduce a new data freshness metric, relative Age of Information (rAoI), and examine it in a single server system with various packet management schemes. The (classical) AoI metric was introduced to measure the staleness of status updates at the receiving end with respect to their generation at the source. This metric addresses systems where the timings of update generation at&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.05428v3-abstract-full').style.display = 'inline'; document.getElementById('1901.05428v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1901.05428v3-abstract-full" style="display: none;"> In this paper, we introduce a new data freshness metric, relative Age of Information (rAoI), and examine it in a single server system with various packet management schemes. The (classical) AoI metric was introduced to measure the staleness of status updates at the receiving end with respect to their generation at the source. This metric addresses systems where the timings of update generation at the source are absolute and can be designed separately or jointly with the transmission schedules. In many decentralized applications, transmission schedules are blind to update generation timing, and the transmitter can know the timing of an update packet only after it arrives. As such, an update becomes stale after a new one arrives. The rAoI metric measures how fresh the data is at the receiver with respect to the data at the transmitter. It introduces a particularly explicit dependence on the arrival process in the evaluation of age. We investigate several queuing disciplines and provide closed form expressions for rAoI and numerical comparisons. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.05428v3-abstract-full').style.display = 'none'; document.getElementById('1901.05428v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 July, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 January, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1901.02873">arXiv:1901.02873</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1901.02873">pdf</a>, <a href="https://arxiv.org/ps/1901.02873">ps</a>, <a href="https://arxiv.org/format/1901.02873">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> Waiting before Serving: A Companion to Packet Management in Status Update Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Peng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Ozel%2C+O">Omur Ozel</a>, <a href="/search/cs?searchtype=author&amp;query=Subramaniam%2C+S">Suresh Subramaniam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1901.02873v4-abstract-short" style="display: inline;"> In this paper, we explore the potential of server waiting before packet transmission in improving the Age of Information (AoI) in status update systems. We consider a non-preemptive queue with Poisson arrivals and independent general service distribution and we incorporate waiting before serving in two packet management schemes: M/GI/1/1 and M/GI/1/$2^*$. In M/GI/1/1 scheme, the server waits for a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.02873v4-abstract-full').style.display = 'inline'; document.getElementById('1901.02873v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1901.02873v4-abstract-full" style="display: none;"> In this paper, we explore the potential of server waiting before packet transmission in improving the Age of Information (AoI) in status update systems. We consider a non-preemptive queue with Poisson arrivals and independent general service distribution and we incorporate waiting before serving in two packet management schemes: M/GI/1/1 and M/GI/1/$2^*$. In M/GI/1/1 scheme, the server waits for a deterministic time immediately after a packet enters the server. In M/GI/1/$2^*$ scheme, depending on idle or busy system state, the server waits for a deterministic time before starting service of the packet. In both cases, if a potential newer arrival is captured existing packet is discarded. Different from most existing works, we analyze AoI evolution by indexing the incoming packets, which is enabled by an alternative method of partitioning the area under the evolution of instantaneous AoI to calculate its time average. We obtain expressions for average and average peak AoI for both queueing disciplines with waiting. Our numerical results demonstrate that waiting before service can bring significant improvement in average age, particularly, for heavy-tailed service distributions. This improvement comes at the expense of an increase in average peak AoI. We highlight the trade-off between average and average peak AoI generated by waiting before serving. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.02873v4-abstract-full').style.display = 'none'; document.getElementById('1901.02873v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 April, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 January, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1811.04498">arXiv:1811.04498</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1811.04498">pdf</a>, <a href="https://arxiv.org/format/1811.04498">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Product Title Refinement via Multi-Modal Generative Adversarial Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Jianguo Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Pengcheng Zou</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Z">Zhao Li</a>, <a href="/search/cs?searchtype=author&amp;query=Wan%2C+Y">Yao Wan</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Ye Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+X">Xiuming Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Gong%2C+Y">Yu Gong</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+P+S">Philip S. Yu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1811.04498v1-abstract-short" style="display: inline;"> Nowadays, an increasing number of customers are in favor of using E-commerce Apps to browse and purchase products. Since merchants are usually inclined to employ redundant and over-informative product titles to attract customers&#39; attention, it is of great importance to concisely display short product titles on limited screen of cell phones. Previous researchers mainly consider textual information&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.04498v1-abstract-full').style.display = 'inline'; document.getElementById('1811.04498v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1811.04498v1-abstract-full" style="display: none;"> Nowadays, an increasing number of customers are in favor of using E-commerce Apps to browse and purchase products. Since merchants are usually inclined to employ redundant and over-informative product titles to attract customers&#39; attention, it is of great importance to concisely display short product titles on limited screen of cell phones. Previous researchers mainly consider textual information of long product titles and lack of human-like view during training and evaluation procedure. In this paper, we propose a Multi-Modal Generative Adversarial Network (MM-GAN) for short product title generation, which innovatively incorporates image information, attribute tags from the product and the textual information from original long titles. MM-GAN treats short titles generation as a reinforcement learning process, where the generated titles are evaluated by the discriminator in a human-like view. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.04498v1-abstract-full').style.display = 'none'; document.getElementById('1811.04498v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Workshop on Visually Grounded Interaction and Language, NIPS, 2018</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1708.09532">arXiv:1708.09532</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1708.09532">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.physa.2018.08.053">10.1016/j.physa.2018.08.053 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Leveraging local h-index to identify and rank influential spreaders in networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Q">Qiang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+Y">Yuxiao Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+Y">Yan Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Deng%2C+L">Lu Deng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+B">Bin Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+J">Junxing Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Zou%2C+P">Peng Zou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1708.09532v2-abstract-short" style="display: inline;"> Identifying influential nodes in complex networks has received increasing attention for its great theoretical and practical applications in many fields. Traditional methods, such as degree centrality, betweenness centrality, closeness centrality, and coreness centrality, have more or less disadvantages in detecting influential nodes, which have been illustrated in related literatures. Recently, th&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1708.09532v2-abstract-full').style.display = 'inline'; document.getElementById('1708.09532v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1708.09532v2-abstract-full" style="display: none;"> Identifying influential nodes in complex networks has received increasing attention for its great theoretical and practical applications in many fields. Traditional methods, such as degree centrality, betweenness centrality, closeness centrality, and coreness centrality, have more or less disadvantages in detecting influential nodes, which have been illustrated in related literatures. Recently, the h-index, which is utilized to measure both the productivity and citation impact of the publications of a scientist or scholar, has been introduced to the network world to evaluate a node&#39;s spreading ability. However, this method assigns too many nodes with the same value, which leads to a resolution limit problem in distinguishing the real influence of these nodes. In this paper, we propose a local h-index centrality (LH-index) method for identifying and ranking influential nodes in networks. The LH-index method simultaneously takes into account of h-index values of the node itself and its neighbors, which is based on the idea that a node connects to more influential nodes will also be influential. According to the simulation results with the stochastic Susceptible-Infected-Recovered (SIR) model in four real world networks and several simulated networks, we demonstrate the effectivity of the LH-index method in identifying influential nodes in networks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1708.09532v2-abstract-full').style.display = 'none'; document.getElementById('1708.09532v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 September, 2017; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 August, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages,6 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Q. Liu, Physica A (2018) 379-391 </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10