CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–50 of 93 results for author: <span class="mathjax">Chou, C</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&query=Chou%2C+C">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Chou, C"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Chou%2C+C&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Chou, C"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&query=Chou%2C+C&start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&query=Chou%2C+C&start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&query=Chou%2C+C&start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.13079">arXiv:2409.13079</a> <span> [<a href="https://arxiv.org/pdf/2409.13079">pdf</a>, <a href="https://arxiv.org/format/2409.13079">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Embedding Geometries of Contrastive Language-Image Pre-Training </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chou%2C+J+C">Jason Chuan-Chih Chou</a>, <a href="/search/cs?searchtype=author&query=Alam%2C+N">Nahid Alam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.13079v1-abstract-short" style="display: inline;"> Since the publication of CLIP, the approach of using InfoNCE loss for contrastive pre-training has become widely popular for bridging two or more modalities. Despite its wide adoption, CLIP's original design choices of L2 normalization and cosine similarity logit have rarely been revisited. We have systematically experimented with alternative geometries and softmax logits for language-image pre-tr… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.13079v1-abstract-full').style.display = 'inline'; document.getElementById('2409.13079v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.13079v1-abstract-full" style="display: none;"> Since the publication of CLIP, the approach of using InfoNCE loss for contrastive pre-training has become widely popular for bridging two or more modalities. Despite its wide adoption, CLIP's original design choices of L2 normalization and cosine similarity logit have rarely been revisited. We have systematically experimented with alternative geometries and softmax logits for language-image pre-training and identified that variants with intuitive Euclidean geometry, Euclidean CLIP (EuCLIP), match or exceed the performance of CLIP and support hierarchical relationships at least as well as more complicated hyperbolic alternative. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.13079v1-abstract-full').style.display = 'none'; document.getElementById('2409.13079v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ECCV 2024 - Beyond Euclidean Workshop</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.12386">arXiv:2409.12386</a> <span> [<a href="https://arxiv.org/pdf/2409.12386">pdf</a>, <a href="https://arxiv.org/format/2409.12386">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> Channel-Aware Domain-Adaptive Generative Adversarial Network for Robust Speech Recognition </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wang%2C+C">Chien-Chun Wang</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+L">Li-Wei Chen</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Cheng-Kang Chou</a>, <a href="/search/cs?searchtype=author&query=Lee%2C+H">Hung-Shin Lee</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+B">Berlin Chen</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+H">Hsin-Min Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.12386v1-abstract-short" style="display: inline;"> While pre-trained automatic speech recognition (ASR) systems demonstrate impressive performance on matched domains, their performance often degrades when confronted with channel mismatch stemming from unseen recording environments and conditions. To mitigate this issue, we propose a novel channel-aware data simulation method for robust ASR training. Our method harnesses the synergistic power of ch… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.12386v1-abstract-full').style.display = 'inline'; document.getElementById('2409.12386v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.12386v1-abstract-full" style="display: none;"> While pre-trained automatic speech recognition (ASR) systems demonstrate impressive performance on matched domains, their performance often degrades when confronted with channel mismatch stemming from unseen recording environments and conditions. To mitigate this issue, we propose a novel channel-aware data simulation method for robust ASR training. Our method harnesses the synergistic power of channel-extractive techniques and generative adversarial networks (GANs). We first train a channel encoder capable of extracting embeddings from arbitrary audio. On top of this, channel embeddings are extracted using a minimal amount of target-domain data and used to guide a GAN-based speech synthesizer. This synthesizer generates speech that faithfully preserves the phonetic content of the input while mimicking the channel characteristics of the target domain. We evaluate our method on the challenging Hakka Across Taiwan (HAT) and Taiwanese Across Taiwan (TAT) corpora, achieving relative character error rate (CER) reductions of 20.02% and 9.64%, respectively, compared to the baselines. These results highlight the efficacy of our channel-aware data simulation method for bridging the gap between source- and target-domain acoustics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.12386v1-abstract-full').style.display = 'none'; document.getElementById('2409.12386v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submitted to ICASSP 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.06355">arXiv:2409.06355</a> <span> [<a href="https://arxiv.org/pdf/2409.06355">pdf</a>, <a href="https://arxiv.org/format/2409.06355">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DiffQRCoder: Diffusion-based Aesthetic QR Code Generation with Scanning Robustness Guided Iterative Refinement </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Liao%2C+J">Jia-Wei Liao</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+W">Winston Wang</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+T">Tzu-Sian Wang</a>, <a href="/search/cs?searchtype=author&query=Peng%2C+L">Li-Xuan Peng</a>, <a href="/search/cs?searchtype=author&query=Weng%2C+J">Ju-Hsuan Weng</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Cheng-Fu Chou</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+J">Jun-Cheng Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.06355v1-abstract-short" style="display: inline;"> With the success of Diffusion Models for image generation, the technologies also have revolutionized the aesthetic Quick Response (QR) code generation. Despite significant improvements in visual attractiveness for the beautified codes, their scannabilities are usually sacrificed and thus hinder their practical uses in real-world scenarios. To address this issue, we propose a novel Diffusion-based… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.06355v1-abstract-full').style.display = 'inline'; document.getElementById('2409.06355v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.06355v1-abstract-full" style="display: none;"> With the success of Diffusion Models for image generation, the technologies also have revolutionized the aesthetic Quick Response (QR) code generation. Despite significant improvements in visual attractiveness for the beautified codes, their scannabilities are usually sacrificed and thus hinder their practical uses in real-world scenarios. To address this issue, we propose a novel Diffusion-based QR Code generator (DiffQRCoder) to effectively craft both scannable and visually pleasing QR codes. The proposed approach introduces Scanning-Robust Perceptual Guidance (SRPG), a new diffusion guidance for Diffusion Models to guarantee the generated aesthetic codes to obey the ground-truth QR codes while maintaining their attractiveness during the denoising process. Additionally, we present another post-processing technique, Scanning Robust Manifold Projected Gradient Descent (SR-MPGD), to further enhance their scanning robustness through iterative latent space optimization. With extensive experiments, the results demonstrate that our approach not only outperforms other compared methods in Scanning Success Rate (SSR) with better or comparable CLIP aesthetic score (CLIP-aes.) but also significantly improves the SSR of the ControlNet-only approach from 60% to 99%. The subjective evaluation indicates that our approach achieves promising visual attractiveness to users as well. Finally, even with different scanning angles and the most rigorous error tolerance settings, our approach robustly achieves over 95% SSR, demonstrating its capability for real-world applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.06355v1-abstract-full').style.display = 'none'; document.getElementById('2409.06355v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.11810">arXiv:2408.11810</a> <span> [<a href="https://arxiv.org/pdf/2408.11810">pdf</a>, <a href="https://arxiv.org/format/2408.11810">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Pixel Is Not A Barrier: An Effective Evasion Attack for Pixel-Domain Diffusion Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Shih%2C+C">Chun-Yen Shih</a>, <a href="/search/cs?searchtype=author&query=Peng%2C+L">Li-Xuan Peng</a>, <a href="/search/cs?searchtype=author&query=Liao%2C+J">Jia-Wei Liao</a>, <a href="/search/cs?searchtype=author&query=Chu%2C+E">Ernie Chu</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Cheng-Fu Chou</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+J">Jun-Cheng Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.11810v1-abstract-short" style="display: inline;"> Diffusion Models have emerged as powerful generative models for high-quality image synthesis, with many subsequent image editing techniques based on them. However, the ease of text-based image editing introduces significant risks, such as malicious editing for scams or intellectual property infringement. Previous works have attempted to safeguard images from diffusion-based editing by adding imper… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.11810v1-abstract-full').style.display = 'inline'; document.getElementById('2408.11810v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.11810v1-abstract-full" style="display: none;"> Diffusion Models have emerged as powerful generative models for high-quality image synthesis, with many subsequent image editing techniques based on them. However, the ease of text-based image editing introduces significant risks, such as malicious editing for scams or intellectual property infringement. Previous works have attempted to safeguard images from diffusion-based editing by adding imperceptible perturbations. These methods are costly and specifically target prevalent Latent Diffusion Models (LDMs), while Pixel-domain Diffusion Models (PDMs) remain largely unexplored and robust against such attacks. Our work addresses this gap by proposing a novel attacking framework with a feature representation attack loss that exploits vulnerabilities in denoising UNets and a latent optimization strategy to enhance the naturalness of protected images. Extensive experiments demonstrate the effectiveness of our approach in attacking dominant PDM-based editing methods (e.g., SDEdit) while maintaining reasonable protection fidelity and robustness against common defense methods. Additionally, our framework is extensible to LDMs, achieving comparable performance to existing approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.11810v1-abstract-full').style.display = 'none'; document.getElementById('2408.11810v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.08665">arXiv:2407.08665</a> <span> [<a href="https://arxiv.org/pdf/2407.08665">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Materials Science">cond-mat.mtrl-sci</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Emerging Technologies">cs.ET</span> </div> </div> <p class="title is-5 mathjax"> Closed Loop Superparamagnetic Tunnel Junctions for Reliable True Randomness and Generative Artificial Intelligence </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Koh%2C+D">Dooyong Koh</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+Q">Qiuyuan Wang</a>, <a href="/search/cs?searchtype=author&query=McGoldrick%2C+B+C">Brooke C. McGoldrick</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chung-Tao Chou</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+L">Luqiao Liu</a>, <a href="/search/cs?searchtype=author&query=Baldo%2C+M+A">Marc A. Baldo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.08665v2-abstract-short" style="display: inline;"> Physical devices exhibiting stochastic functions with low energy consumption and high device density have the potential to enable complex probability-based computing algorithms, accelerate machine learning tasks, and enhance hardware security. Recently, superparamagnetic tunnel junctions (sMTJs) have been widely explored for such purposes, leading to the development of sMTJ-based systems; however,… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.08665v2-abstract-full').style.display = 'inline'; document.getElementById('2407.08665v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.08665v2-abstract-full" style="display: none;"> Physical devices exhibiting stochastic functions with low energy consumption and high device density have the potential to enable complex probability-based computing algorithms, accelerate machine learning tasks, and enhance hardware security. Recently, superparamagnetic tunnel junctions (sMTJs) have been widely explored for such purposes, leading to the development of sMTJ-based systems; however, the reliance on nanoscale ferromagnets limits scalability and reliability, making sMTJs sensitive to external perturbations and prone to significant device variations. Here, we present an experimental demonstration of closed loop three-terminal sMTJs as reliable and potentially scalable sources of true randomness in the field-free regime. By leveraging dual-current controllability and incorporating feedback, we stabilize the switching operation of superparamagnets and reach cryptographic-quality random bitstreams. The realization of controllable and robust true random sMTJs underpin a general hardware platform for computing schemes exploiting the stochasticity in the physical world, as demonstrated by the generative artificial intelligence example in our experiment. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.08665v2-abstract-full').style.display = 'none'; document.getElementById('2407.08665v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.10272">arXiv:2406.10272</a> <span> [<a href="https://arxiv.org/pdf/2406.10272">pdf</a>, <a href="https://arxiv.org/format/2406.10272">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> Connected Speech-Based Cognitive Assessment in Chinese and English </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Luz%2C+S">Saturnino Luz</a>, <a href="/search/cs?searchtype=author&query=Garcia%2C+S+D+L+F">Sofia De La Fuente Garcia</a>, <a href="/search/cs?searchtype=author&query=Haider%2C+F">Fasih Haider</a>, <a href="/search/cs?searchtype=author&query=Fromm%2C+D">Davida Fromm</a>, <a href="/search/cs?searchtype=author&query=MacWhinney%2C+B">Brian MacWhinney</a>, <a href="/search/cs?searchtype=author&query=Lanzi%2C+A">Alyssa Lanzi</a>, <a href="/search/cs?searchtype=author&query=Chang%2C+Y">Ya-Ning Chang</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chia-Ju Chou</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+Y">Yi-Chien Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.10272v2-abstract-short" style="display: inline;"> We present a novel benchmark dataset and prediction tasks for investigating approaches to assess cognitive function through analysis of connected speech. The dataset consists of speech samples and clinical information for speakers of Mandarin Chinese and English with different levels of cognitive impairment as well as individuals with normal cognition. These data have been carefully matched by age… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.10272v2-abstract-full').style.display = 'inline'; document.getElementById('2406.10272v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.10272v2-abstract-full" style="display: none;"> We present a novel benchmark dataset and prediction tasks for investigating approaches to assess cognitive function through analysis of connected speech. The dataset consists of speech samples and clinical information for speakers of Mandarin Chinese and English with different levels of cognitive impairment as well as individuals with normal cognition. These data have been carefully matched by age and sex by propensity score analysis to ensure balance and representativity in model training. The prediction tasks encompass mild cognitive impairment diagnosis and cognitive test score prediction. This framework was designed to encourage the development of approaches to speech-based cognitive assessment which generalise across languages. We illustrate it by presenting baseline prediction models that employ language-agnostic and comparable features for diagnosis and cognitive test score prediction. The models achieved unweighted average recall was 59.2% in diagnosis, and root mean squared error of 2.89 in score prediction. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.10272v2-abstract-full').style.display = 'none'; document.getElementById('2406.10272v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in Proceedings of Interspeech 2024</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> J.3; I.5.4 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.12026">arXiv:2405.12026</a> <span> [<a href="https://arxiv.org/pdf/2405.12026">pdf</a>, <a href="https://arxiv.org/format/2405.12026">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Enzymatic cycle-based receivers with high input impedance for approximate maximum a posteriori demodulation of concentration modulated signals </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chou%2C+C+T">Chun Tung Chou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.12026v2-abstract-short" style="display: inline;"> Molecular communication is a bio-inspired communication paradigm where molecules are used as the information carrier. This paper considers a molecular communication network where the transmitter uses concentration modulated signals for communication. Our focus is to design receivers that can demodulate these signals. We impose three features on our receivers. We want the receivers to use enzymatic… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.12026v2-abstract-full').style.display = 'inline'; document.getElementById('2405.12026v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.12026v2-abstract-full" style="display: none;"> Molecular communication is a bio-inspired communication paradigm where molecules are used as the information carrier. This paper considers a molecular communication network where the transmitter uses concentration modulated signals for communication. Our focus is to design receivers that can demodulate these signals. We impose three features on our receivers. We want the receivers to use enzymatic cycles as their building blocks, have high input impedance and can work approximately as a maximum a posteriori (MAP) demodulator. No receivers with all these three features exist in the current molecular communication literature. We consider enzymatic cycles because they are a very common class of chemical reactions that are found in living cells. Since a receiver is to be placed in the communication environment, it should ideally have a high input impedance so that it has minimal impact on the environment and on other receivers. Lastly, a MAP receiver has good statistical performance. In this paper, we show how we can use time-scale separation to make an enzymatic cycle to have high input impedance and how the parameters of the enzymatic cycles can be chosen so that the receiver can approximately implement a MAP demodulator. We use simulation to study the performance of this receiver. In particular, we consider an environment with multiple receivers and show that a receiver has little impact on the bit error ratio of a nearby receiver because they have high input impedance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.12026v2-abstract-full').style.display = 'none'; document.getElementById('2405.12026v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.06851">arXiv:2405.06851</a> <span> [<a href="https://arxiv.org/pdf/2405.06851">pdf</a>, <a href="https://arxiv.org/format/2405.06851">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neurons and Cognition">q-bio.NC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Disordered Systems and Neural Networks">cond-mat.dis-nn</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Statistical Mechanics">cond-mat.stat-mech</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Nonlinear classification of neural manifolds with contextual information </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mignacco%2C+F">Francesca Mignacco</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chi-Ning Chou</a>, <a href="/search/cs?searchtype=author&query=Chung%2C+S">SueYeon Chung</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.06851v1-abstract-short" style="display: inline;"> Understanding how neural systems efficiently process information through distributed representations is a fundamental challenge at the interface of neuroscience and machine learning. Recent approaches analyze the statistical and geometrical attributes of neural representations as population-level mechanistic descriptors of task implementation. In particular, manifold capacity has emerged as a prom… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.06851v1-abstract-full').style.display = 'inline'; document.getElementById('2405.06851v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.06851v1-abstract-full" style="display: none;"> Understanding how neural systems efficiently process information through distributed representations is a fundamental challenge at the interface of neuroscience and machine learning. Recent approaches analyze the statistical and geometrical attributes of neural representations as population-level mechanistic descriptors of task implementation. In particular, manifold capacity has emerged as a promising framework linking population geometry to the separability of neural manifolds. However, this metric has been limited to linear readouts. Here, we propose a theoretical framework that overcomes this limitation by leveraging contextual input information. We derive an exact formula for the context-dependent capacity that depends on manifold geometry and context correlations, and validate it on synthetic and real data. Our framework's increased expressivity captures representation untanglement in deep networks at early stages of the layer hierarchy, previously inaccessible to analysis. As context-dependent nonlinearity is ubiquitous in neural systems, our data-driven and theoretically grounded approach promises to elucidate context-dependent computation across scales, datasets, and models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.06851v1-abstract-full').style.display = 'none'; document.getElementById('2405.06851v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.06801">arXiv:2405.06801</a> <span> [<a href="https://arxiv.org/pdf/2405.06801">pdf</a>, <a href="https://arxiv.org/format/2405.06801">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/MNET.2024.3391271">10.1109/MNET.2024.3391271 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> LEO Satellite Network Access in the Wild: Potentials, Experiences, and Challenges </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ma%2C+S">Sami Ma</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+Y+C">Yi Ching Chou</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+M">Miao Zhang</a>, <a href="/search/cs?searchtype=author&query=Fang%2C+H">Hao Fang</a>, <a href="/search/cs?searchtype=author&query=Zhao%2C+H">Haoyuan Zhao</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+J">Jiangchuan Liu</a>, <a href="/search/cs?searchtype=author&query=Atlas%2C+W+I">William I. Atlas</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.06801v1-abstract-short" style="display: inline;"> In the past three years, working with the Pacific Salmon Foundation and various First Nations groups, we have established Starlink-empowered wild salmon monitoring sites in remote Northern British Columbia, Canada. We report our experiences with the network services in these challenging environments, including deep woods and deep valleys, that lack infrastructural support with some close to Starli… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.06801v1-abstract-full').style.display = 'inline'; document.getElementById('2405.06801v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.06801v1-abstract-full" style="display: none;"> In the past three years, working with the Pacific Salmon Foundation and various First Nations groups, we have established Starlink-empowered wild salmon monitoring sites in remote Northern British Columbia, Canada. We report our experiences with the network services in these challenging environments, including deep woods and deep valleys, that lack infrastructural support with some close to Starlink's service boundary at the far north. We assess the portability and mobility of the satellite dishes and the quality of existing network access in underdeveloped countries that Starlink expects to cover. Our experiences suggest that network access based on LEO satellite constellations holds promise but faces hurdles such as energy supply constraints and environmental factors like temperature, precipitation, and solar storms. The presence of wildlife and respecting local residents' culture and heritage pose further complications. We envision several technical solutions addressing the challenges and believe that further regulations will be necessary. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.06801v1-abstract-full').style.display = 'none'; document.getElementById('2405.06801v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 6 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> C.2.1 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.15252">arXiv:2404.15252</a> <span> [<a href="https://arxiv.org/pdf/2404.15252">pdf</a>, <a href="https://arxiv.org/format/2404.15252">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Source-free Domain Adaptation for Video Object Detection Under Adverse Image Conditions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Zhang%2C+X">Xingguang Zhang</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chih-Hsien Chou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.15252v1-abstract-short" style="display: inline;"> When deploying pre-trained video object detectors in real-world scenarios, the domain gap between training and testing data caused by adverse image conditions often leads to performance degradation. Addressing this issue becomes particularly challenging when only the pre-trained model and degraded videos are available. Although various source-free domain adaptation (SFDA) methods have been propose… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.15252v1-abstract-full').style.display = 'inline'; document.getElementById('2404.15252v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.15252v1-abstract-full" style="display: none;"> When deploying pre-trained video object detectors in real-world scenarios, the domain gap between training and testing data caused by adverse image conditions often leads to performance degradation. Addressing this issue becomes particularly challenging when only the pre-trained model and degraded videos are available. Although various source-free domain adaptation (SFDA) methods have been proposed for single-frame object detectors, SFDA for video object detection (VOD) remains unexplored. Moreover, most unsupervised domain adaptation works for object detection rely on two-stage detectors, while SFDA for one-stage detectors, which are more vulnerable to fine-tuning, is not well addressed in the literature. In this paper, we propose Spatial-Temporal Alternate Refinement with Mean Teacher (STAR-MT), a simple yet effective SFDA method for VOD. Specifically, we aim to improve the performance of the one-stage VOD method, YOLOV, under adverse image conditions, including noise, air turbulence, and haze. Extensive experiments on the ImageNetVOD dataset and its degraded versions demonstrate that our method consistently improves video object detection performance in challenging imaging conditions, showcasing its potential for real-world applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.15252v1-abstract-full').style.display = 'none'; document.getElementById('2404.15252v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">accepted by the UG2+ workshop at CVPR 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.16451">arXiv:2403.16451</a> <span> [<a href="https://arxiv.org/pdf/2403.16451">pdf</a>, <a href="https://arxiv.org/format/2403.16451">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> DeepMachining: Online Prediction of Machining Errors of Lathe Machines </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Lu%2C+X">Xiang-Li Lu</a>, <a href="/search/cs?searchtype=author&query=Hsu%2C+H">Hwai-Jung Hsu</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Che-Wei Chou</a>, <a href="/search/cs?searchtype=author&query=Kung%2C+H+T">H. T. Kung</a>, <a href="/search/cs?searchtype=author&query=Lee%2C+C">Chen-Hsin Lee</a>, <a href="/search/cs?searchtype=author&query=Cheng%2C+S">Sheng-Mao Cheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.16451v4-abstract-short" style="display: inline;"> We describe DeepMachining, a deep learning-based AI system for online prediction of machining errors of lathe machine operations. We have built and evaluated DeepMachining based on manufacturing data from factories. Specifically, we first pretrain a deep learning model for a given lathe machine's operations to learn the salient features of machining states. Then, we fine-tune the pretrained model… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.16451v4-abstract-full').style.display = 'inline'; document.getElementById('2403.16451v4-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.16451v4-abstract-full" style="display: none;"> We describe DeepMachining, a deep learning-based AI system for online prediction of machining errors of lathe machine operations. We have built and evaluated DeepMachining based on manufacturing data from factories. Specifically, we first pretrain a deep learning model for a given lathe machine's operations to learn the salient features of machining states. Then, we fine-tune the pretrained model to adapt to specific machining tasks. We demonstrate that DeepMachining achieves high prediction accuracy for multiple tasks that involve different workpieces and cutting tools. To the best of our knowledge, this work is one of the first factory experiments using pre-trained deep-learning models to predict machining errors of lathe machines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.16451v4-abstract-full').style.display = 'none'; document.getElementById('2403.16451v4-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 25 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.15878">arXiv:2403.15878</a> <span> [<a href="https://arxiv.org/pdf/2403.15878">pdf</a>, <a href="https://arxiv.org/format/2403.15878">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Diffusion-based Aesthetic QR Code Generation via Scanning-Robust Perceptual Guidance </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Liao%2C+J">Jia-Wei Liao</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+W">Winston Wang</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+T">Tzu-Sian Wang</a>, <a href="/search/cs?searchtype=author&query=Peng%2C+L">Li-Xuan Peng</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Cheng-Fu Chou</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+J">Jun-Cheng Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.15878v1-abstract-short" style="display: inline;"> QR codes, prevalent in daily applications, lack visual appeal due to their conventional black-and-white design. Integrating aesthetics while maintaining scannability poses a challenge. In this paper, we introduce a novel diffusion-model-based aesthetic QR code generation pipeline, utilizing pre-trained ControlNet and guided iterative refinement via a novel classifier guidance (SRG) based on the pr… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.15878v1-abstract-full').style.display = 'inline'; document.getElementById('2403.15878v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.15878v1-abstract-full" style="display: none;"> QR codes, prevalent in daily applications, lack visual appeal due to their conventional black-and-white design. Integrating aesthetics while maintaining scannability poses a challenge. In this paper, we introduce a novel diffusion-model-based aesthetic QR code generation pipeline, utilizing pre-trained ControlNet and guided iterative refinement via a novel classifier guidance (SRG) based on the proposed Scanning-Robust Loss (SRL) tailored with QR code mechanisms, which ensures both aesthetics and scannability. To further improve the scannability while preserving aesthetics, we propose a two-stage pipeline with Scanning-Robust Perceptual Guidance (SRPG). Moreover, we can further enhance the scannability of the generated QR code by post-processing it through the proposed Scanning-Robust Projected Gradient Descent (SRPGD) post-processing technique based on SRL with proven convergence. With extensive quantitative, qualitative, and subjective experiments, the results demonstrate that the proposed approach can generate diverse aesthetic QR codes with flexibility in detail. In addition, our pipelines outperforming existing models in terms of Scanning Success Rate (SSR) 86.67% (+40%) with comparable aesthetic scores. The pipeline combined with SRPGD further achieves 96.67% (+50%). Our code will be available https://github.com/jwliao1209/DiffQRCode. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.15878v1-abstract-full').style.display = 'none'; document.getElementById('2403.15878v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.07225">arXiv:2403.07225</a> <span> [<a href="https://arxiv.org/pdf/2403.07225">pdf</a>, <a href="https://arxiv.org/format/2403.07225">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Stereo-NEC: Enhancing Stereo Visual-Inertial SLAM Initialization with Normal Epipolar Constraints </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wang%2C+W">Weihan Wang</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chieh Chou</a>, <a href="/search/cs?searchtype=author&query=Sevagamoorthy%2C+G">Ganesh Sevagamoorthy</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+K">Kevin Chen</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+Z">Zheng Chen</a>, <a href="/search/cs?searchtype=author&query=Feng%2C+Z">Ziyue Feng</a>, <a href="/search/cs?searchtype=author&query=Xia%2C+Y">Youjie Xia</a>, <a href="/search/cs?searchtype=author&query=Cai%2C+F">Feiyang Cai</a>, <a href="/search/cs?searchtype=author&query=Xu%2C+Y">Yi Xu</a>, <a href="/search/cs?searchtype=author&query=Mordohai%2C+P">Philippos Mordohai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.07225v1-abstract-short" style="display: inline;"> We propose an accurate and robust initialization approach for stereo visual-inertial SLAM systems. Unlike the current state-of-the-art method, which heavily relies on the accuracy of a pure visual SLAM system to estimate inertial variables without updating camera poses, potentially compromising accuracy and robustness, our approach offers a different solution. We realize the crucial impact of prec… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.07225v1-abstract-full').style.display = 'inline'; document.getElementById('2403.07225v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.07225v1-abstract-full" style="display: none;"> We propose an accurate and robust initialization approach for stereo visual-inertial SLAM systems. Unlike the current state-of-the-art method, which heavily relies on the accuracy of a pure visual SLAM system to estimate inertial variables without updating camera poses, potentially compromising accuracy and robustness, our approach offers a different solution. We realize the crucial impact of precise gyroscope bias estimation on rotation accuracy. This, in turn, affects trajectory accuracy due to the accumulation of translation errors. To address this, we first independently estimate the gyroscope bias and use it to formulate a maximum a posteriori problem for further refinement. After this refinement, we proceed to update the rotation estimation by performing IMU integration with gyroscope bias removed from gyroscope measurements. We then leverage robust and accurate rotation estimates to enhance translation estimation via 3-DoF bundle adjustment. Moreover, we introduce a novel approach for determining the success of the initialization by evaluating the residual of the normal epipolar constraint. Extensive evaluations on the EuRoC dataset illustrate that our method excels in accuracy and robustness. It outperforms ORB-SLAM3, the current leading stereo visual-inertial initialization method, in terms of absolute trajectory error and relative rotation error, while maintaining competitive computational speed. Notably, even with 5 keyframes for initialization, our method consistently surpasses the state-of-the-art approach using 10 keyframes in rotation accuracy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.07225v1-abstract-full').style.display = 'none'; document.getElementById('2403.07225v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.17244">arXiv:2401.17244</a> <span> [<a href="https://arxiv.org/pdf/2401.17244">pdf</a>, <a href="https://arxiv.org/format/2401.17244">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Materials Science">cond-mat.mtrl-sci</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> LLaMP: Large Language Model Made Powerful for High-fidelity Materials Knowledge Retrieval and Distillation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chiang%2C+Y">Yuan Chiang</a>, <a href="/search/cs?searchtype=author&query=Hsieh%2C+E">Elvis Hsieh</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chia-Hong Chou</a>, <a href="/search/cs?searchtype=author&query=Riebesell%2C+J">Janosh Riebesell</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.17244v3-abstract-short" style="display: inline;"> Reducing hallucination of Large Language Models (LLMs) is imperative for use in the sciences, where reliability and reproducibility are crucial. However, LLMs inherently lack long-term memory, making it a nontrivial, ad hoc, and inevitably biased task to fine-tune them on domain-specific literature and data. Here we introduce LLaMP, a multimodal retrieval-augmented generation (RAG) framework of hi… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.17244v3-abstract-full').style.display = 'inline'; document.getElementById('2401.17244v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.17244v3-abstract-full" style="display: none;"> Reducing hallucination of Large Language Models (LLMs) is imperative for use in the sciences, where reliability and reproducibility are crucial. However, LLMs inherently lack long-term memory, making it a nontrivial, ad hoc, and inevitably biased task to fine-tune them on domain-specific literature and data. Here we introduce LLaMP, a multimodal retrieval-augmented generation (RAG) framework of hierarchical reasoning-and-acting (ReAct) agents that can dynamically and recursively interact with computational and experimental data on Materials Project (MP) and run atomistic simulations via high-throughput workflow interface. Without fine-tuning, LLaMP demonstrates strong tool usage ability to comprehend and integrate various modalities of materials science concepts, fetch relevant data stores on the fly, process higher-order data (such as crystal structure and elastic tensor), and streamline complex tasks in computational materials and chemistry. We propose a simple metric combining uncertainty and confidence estimates to evaluate the self-consistency of responses by LLaMP and vanilla LLMs. Our benchmark shows that LLaMP effectively mitigates the intrinsic bias in LLMs, counteracting the errors on bulk moduli, electronic bandgaps, and formation energies that seem to derive from mixed data sources. We also demonstrate LLaMP's capability to edit crystal structures and run annealing molecular dynamics simulations using pre-trained machine-learning force fields. The framework offers an intuitive and nearly hallucination-free approach to exploring and scaling materials informatics, and establishes a pathway for knowledge distillation and fine-tuning other language models. Code and live demo are available at https://github.com/chiang-yuan/llamp <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.17244v3-abstract-full').style.display = 'none'; document.getElementById('2401.17244v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">32 pages, 5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.16945">arXiv:2401.16945</a> <span> [<a href="https://arxiv.org/pdf/2401.16945">pdf</a>, <a href="https://arxiv.org/format/2401.16945">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> </div> </div> <p class="title is-5 mathjax"> Online Resource Allocation with Non-Stationary Customers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Zhang%2C+X">Xiaoyue Zhang</a>, <a href="/search/cs?searchtype=author&query=Qin%2C+H">Hanzhang Qin</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+M+C">Mabel C. Chou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.16945v2-abstract-short" style="display: inline;"> We propose a novel algorithm for online resource allocation with non-stationary customer arrivals and unknown click-through rates. We assume multiple types of customers arrive in a nonstationary stochastic fashion, with unknown arrival rates in each period, and that customers' click-through rates are unknown and can only be learned online. By leveraging results from the stochastic contextual bandi… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.16945v2-abstract-full').style.display = 'inline'; document.getElementById('2401.16945v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.16945v2-abstract-full" style="display: none;"> We propose a novel algorithm for online resource allocation with non-stationary customer arrivals and unknown click-through rates. We assume multiple types of customers arrive in a nonstationary stochastic fashion, with unknown arrival rates in each period, and that customers' click-through rates are unknown and can only be learned online. By leveraging results from the stochastic contextual bandit with knapsack and online matching with adversarial arrivals, we develop an online scheme to allocate the resources to nonstationary customers. We prove that under mild conditions, our scheme achieves a ``best-of-both-world'' result: the scheme has a sublinear regret when the customer arrivals are near-stationary, and enjoys an optimal competitive ratio under general (non-stationary) customer arrival distributions. Finally, we conduct extensive numerical experiments to show our approach generates near-optimal revenues for all different customer scenarios. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.16945v2-abstract-full').style.display = 'none'; document.getElementById('2401.16945v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.02905">arXiv:2401.02905</a> <span> [<a href="https://arxiv.org/pdf/2401.02905">pdf</a>, <a href="https://arxiv.org/format/2401.02905">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> H2G2-Net: A Hierarchical Heterogeneous Graph Generative Network Framework for Discovery of Multi-Modal Physiological Responses </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gu%2C+H">Haidong Gu</a>, <a href="/search/cs?searchtype=author&query=Gaw%2C+N">Nathan Gaw</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+Y">Yinan Wang</a>, <a href="/search/cs?searchtype=author&query=Johnstone%2C+C">Chancellor Johnstone</a>, <a href="/search/cs?searchtype=author&query=Beauchene%2C+C">Christine Beauchene</a>, <a href="/search/cs?searchtype=author&query=Yuditskaya%2C+S">Sophia Yuditskaya</a>, <a href="/search/cs?searchtype=author&query=Rao%2C+H">Hrishikesh Rao</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chun-An Chou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.02905v2-abstract-short" style="display: inline;"> Discovering human cognitive and emotional states using multi-modal physiological signals draws attention across various research applications. Physiological responses of the human body are influenced by human cognition and commonly used to analyze cognitive states. From a network science perspective, the interactions of these heterogeneous physiological modalities in a graph structure may provide… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.02905v2-abstract-full').style.display = 'inline'; document.getElementById('2401.02905v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.02905v2-abstract-full" style="display: none;"> Discovering human cognitive and emotional states using multi-modal physiological signals draws attention across various research applications. Physiological responses of the human body are influenced by human cognition and commonly used to analyze cognitive states. From a network science perspective, the interactions of these heterogeneous physiological modalities in a graph structure may provide insightful information to support prediction of cognitive states. However, there is no clue to derive exact connectivity between heterogeneous modalities and there exists a hierarchical structure of sub-modalities. Existing graph neural networks are designed to learn on non-hierarchical homogeneous graphs with pre-defined graph structures; they failed to learn from hierarchical, multi-modal physiological data without a pre-defined graph structure. To this end, we propose a hierarchical heterogeneous graph generative network (H2G2-Net) that automatically learns a graph structure without domain knowledge, as well as a powerful representation on the hierarchical heterogeneous graph in an end-to-end fashion. We validate the proposed method on the CogPilot dataset that consists of multi-modal physiological signals. Extensive experiments demonstrate that our proposed method outperforms the state-of-the-art GNNs by 5%-20% in prediction accuracy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.02905v2-abstract-full').style.display = 'none'; document.getElementById('2401.02905v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Paper accepted in Human-Centric Representation Learning workshop at AAAI 2024 (https://hcrl-workshop.github.io/2024/)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.14285">arXiv:2312.14285</a> <span> [<a href="https://arxiv.org/pdf/2312.14285">pdf</a>, <a href="https://arxiv.org/format/2312.14285">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neurons and Cognition">q-bio.NC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> </div> </div> <p class="title is-5 mathjax"> Probing Biological and Artificial Neural Networks with Task-dependent Neural Manifolds </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Kuoch%2C+M">Michael Kuoch</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chi-Ning Chou</a>, <a href="/search/cs?searchtype=author&query=Parthasarathy%2C+N">Nikhil Parthasarathy</a>, <a href="/search/cs?searchtype=author&query=Dapello%2C+J">Joel Dapello</a>, <a href="/search/cs?searchtype=author&query=DiCarlo%2C+J+J">James J. DiCarlo</a>, <a href="/search/cs?searchtype=author&query=Sompolinsky%2C+H">Haim Sompolinsky</a>, <a href="/search/cs?searchtype=author&query=Chung%2C+S">SueYeon Chung</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.14285v1-abstract-short" style="display: inline;"> Recently, growth in our understanding of the computations performed in both biological and artificial neural networks has largely been driven by either low-level mechanistic studies or global normative approaches. However, concrete methodologies for bridging the gap between these levels of abstraction remain elusive. In this work, we investigate the internal mechanisms of neural networks through t… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.14285v1-abstract-full').style.display = 'inline'; document.getElementById('2312.14285v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.14285v1-abstract-full" style="display: none;"> Recently, growth in our understanding of the computations performed in both biological and artificial neural networks has largely been driven by either low-level mechanistic studies or global normative approaches. However, concrete methodologies for bridging the gap between these levels of abstraction remain elusive. In this work, we investigate the internal mechanisms of neural networks through the lens of neural population geometry, aiming to provide understanding at an intermediate level of abstraction, as a way to bridge that gap. Utilizing manifold capacity theory (MCT) from statistical physics and manifold alignment analysis (MAA) from high-dimensional statistics, we probe the underlying organization of task-dependent manifolds in deep neural networks and macaque neural recordings. Specifically, we quantitatively characterize how different learning objectives lead to differences in the organizational strategies of these models and demonstrate how these geometric analyses are connected to the decodability of task-relevant information. These analyses present a strong direction for bridging mechanistic and normative theories in neural networks through neural population geometry, potentially opening up many future research avenues in both machine learning and neuroscience. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.14285v1-abstract-full').style.display = 'none'; document.getElementById('2312.14285v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in the proceedings of the Conference on Parsimony and Learning (CPAL) 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.05477">arXiv:2311.05477</a> <span> [<a href="https://arxiv.org/pdf/2311.05477">pdf</a>, <a href="https://arxiv.org/format/2311.05477">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Using ResNet to Utilize 4-class T2-FLAIR Slice Classification Based on the Cholinergic Pathways Hyperintensities Scale for Pathological Aging </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Tsai%2C+W+K">Wei-Chun Kevin Tsai</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+Y">Yi-Chien Liu</a>, <a href="/search/cs?searchtype=author&query=Yu%2C+M">Ming-Chun Yu</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chia-Ju Chou</a>, <a href="/search/cs?searchtype=author&query=Yan%2C+S">Sui-Hing Yan</a>, <a href="/search/cs?searchtype=author&query=Fan%2C+Y">Yang-Teng Fan</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+Y">Yan-Hsiang Huang</a>, <a href="/search/cs?searchtype=author&query=Chiu%2C+Y">Yen-Ling Chiu</a>, <a href="/search/cs?searchtype=author&query=Chuang%2C+Y">Yi-Fang Chuang</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+R">Ran-Zan Wang</a>, <a href="/search/cs?searchtype=author&query=Shih%2C+Y">Yao-Chia Shih</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.05477v2-abstract-short" style="display: inline;"> The Cholinergic Pathways Hyperintensities Scale (CHIPS) is a visual rating scale used to assess the extent of cholinergic white matter hyperintensities in T2-FLAIR images, serving as an indicator of dementia severity. However, the manual selection of four specific slices for rating throughout the entire brain is a time-consuming process. Our goal was to develop a deep learning-based model capable… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.05477v2-abstract-full').style.display = 'inline'; document.getElementById('2311.05477v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.05477v2-abstract-full" style="display: none;"> The Cholinergic Pathways Hyperintensities Scale (CHIPS) is a visual rating scale used to assess the extent of cholinergic white matter hyperintensities in T2-FLAIR images, serving as an indicator of dementia severity. However, the manual selection of four specific slices for rating throughout the entire brain is a time-consuming process. Our goal was to develop a deep learning-based model capable of automatically identifying the four slices relevant to CHIPS. To achieve this, we trained a 4-class slice classification model (BSCA) using the ADNI T2-FLAIR dataset (N=150) with the assistance of ResNet. Subsequently, we tested the model's performance on a local dataset (N=30). The results demonstrated the efficacy of our model, with an accuracy of 99.82% and an F1-score of 99.83%. This achievement highlights the potential impact of BSCA as an automatic screening tool, streamlining the selection of four specific T2-FLAIR slices that encompass white matter landmarks along the cholinergic pathways. Clinicians can leverage this tool to assess the risk of clinical dementia development efficiently. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.05477v2-abstract-full').style.display = 'none'; document.getElementById('2311.05477v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 2 figures, 2 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.03285">arXiv:2311.03285</a> <span> [<a href="https://arxiv.org/pdf/2311.03285">pdf</a>, <a href="https://arxiv.org/format/2311.03285">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> S-LoRA: Serving Thousands of Concurrent LoRA Adapters </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Sheng%2C+Y">Ying Sheng</a>, <a href="/search/cs?searchtype=author&query=Cao%2C+S">Shiyi Cao</a>, <a href="/search/cs?searchtype=author&query=Li%2C+D">Dacheng Li</a>, <a href="/search/cs?searchtype=author&query=Hooper%2C+C">Coleman Hooper</a>, <a href="/search/cs?searchtype=author&query=Lee%2C+N">Nicholas Lee</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+S">Shuo Yang</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Christopher Chou</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+B">Banghua Zhu</a>, <a href="/search/cs?searchtype=author&query=Zheng%2C+L">Lianmin Zheng</a>, <a href="/search/cs?searchtype=author&query=Keutzer%2C+K">Kurt Keutzer</a>, <a href="/search/cs?searchtype=author&query=Gonzalez%2C+J+E">Joseph E. Gonzalez</a>, <a href="/search/cs?searchtype=author&query=Stoica%2C+I">Ion Stoica</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.03285v3-abstract-short" style="display: inline;"> The "pretrain-then-finetune" paradigm is commonly adopted in the deployment of large language models. Low-Rank Adaptation (LoRA), a parameter-efficient fine-tuning method, is often employed to adapt a base model to a multitude of tasks, resulting in a substantial collection of LoRA adapters derived from one base model. We observe that this paradigm presents significant opportunities for batched in… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.03285v3-abstract-full').style.display = 'inline'; document.getElementById('2311.03285v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.03285v3-abstract-full" style="display: none;"> The "pretrain-then-finetune" paradigm is commonly adopted in the deployment of large language models. Low-Rank Adaptation (LoRA), a parameter-efficient fine-tuning method, is often employed to adapt a base model to a multitude of tasks, resulting in a substantial collection of LoRA adapters derived from one base model. We observe that this paradigm presents significant opportunities for batched inference during serving. To capitalize on these opportunities, we present S-LoRA, a system designed for the scalable serving of many LoRA adapters. S-LoRA stores all adapters in the main memory and fetches the adapters used by the currently running queries to the GPU memory. To efficiently use the GPU memory and reduce fragmentation, S-LoRA proposes Unified Paging. Unified Paging uses a unified memory pool to manage dynamic adapter weights with different ranks and KV cache tensors with varying sequence lengths. Additionally, S-LoRA employs a novel tensor parallelism strategy and highly optimized custom CUDA kernels for heterogeneous batching of LoRA computation. Collectively, these features enable S-LoRA to serve thousands of LoRA adapters on a single GPU or across multiple GPUs with a small overhead. Compared to state-of-the-art libraries such as HuggingFace PEFT and vLLM (with naive support of LoRA serving), S-LoRA can improve the throughput by up to 4 times and increase the number of served adapters by several orders of magnitude. As a result, S-LoRA enables scalable serving of many task-specific fine-tuned models and offers the potential for large-scale customized fine-tuning services. The code is available at https://github.com/S-LoRA/S-LoRA <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.03285v3-abstract-full').style.display = 'none'; document.getElementById('2311.03285v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 6 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.20539">arXiv:2310.20539</a> <span> [<a href="https://arxiv.org/pdf/2310.20539">pdf</a>, <a href="https://arxiv.org/format/2310.20539">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neurons and Cognition">q-bio.NC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> </div> </div> <p class="title is-5 mathjax"> The Computational Lens: from Quantum Physics to Neuroscience </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chi-Ning Chou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.20539v1-abstract-short" style="display: inline;"> Two transformative waves of computing have redefined the way we approach science. The first wave came with the birth of the digital computer, which enabled scientists to numerically simulate their models and analyze massive datasets. This technological breakthrough led to the emergence of many sub-disciplines bearing the prefix "computational" in their names. Currently, we are in the midst of the… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.20539v1-abstract-full').style.display = 'inline'; document.getElementById('2310.20539v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.20539v1-abstract-full" style="display: none;"> Two transformative waves of computing have redefined the way we approach science. The first wave came with the birth of the digital computer, which enabled scientists to numerically simulate their models and analyze massive datasets. This technological breakthrough led to the emergence of many sub-disciplines bearing the prefix "computational" in their names. Currently, we are in the midst of the second wave, marked by the remarkable advancements in artificial intelligence. From predicting protein structures to classifying galaxies, the scope of its applications is vast, and there can only be more awaiting us on the horizon. While these two waves influence scientific methodology at the instrumental level, in this dissertation, I will present the computational lens in science, aiming at the conceptual level. Specifically, the central thesis posits that computation serves as a convenient and mechanistic language for understanding and analyzing information processing systems, offering the advantages of composability and modularity. This dissertation begins with an illustration of the blueprint of the computational lens, supported by a review of relevant previous work. Subsequently, I will present my own works in quantum physics and neuroscience as concrete examples. In the concluding chapter, I will contemplate the potential of applying the computational lens across various scientific fields, in a way that can provide significant domain insights, and discuss potential future directions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.20539v1-abstract-full').style.display = 'none'; document.getElementById('2310.20539v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">PhD thesis, Harvard University, Cambridge, Massachusetts, USA. 2023. Some chapters report joint work</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.17550">arXiv:2306.17550</a> <span> [<a href="https://arxiv.org/pdf/2306.17550">pdf</a>, <a href="https://arxiv.org/format/2306.17550">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> TTSWING: a Dataset for Table Tennis Swing Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chou%2C+C">Che-Yu Chou</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+Z">Zheng-Hao Chen</a>, <a href="/search/cs?searchtype=author&query=Sheu%2C+Y">Yung-Hoh Sheu</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+H">Hung-Hsuan Chen</a>, <a href="/search/cs?searchtype=author&query=Wu%2C+S+K">Sheng K. Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.17550v1-abstract-short" style="display: inline;"> We introduce TTSWING, a novel dataset designed for table tennis swing analysis. This dataset comprises comprehensive swing information obtained through 9-axis sensors integrated into custom-made racket grips, accompanied by anonymized demographic data of the players. We detail the data collection and annotation procedures. Furthermore, we conduct pilot studies utilizing diverse machine learning mo… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.17550v1-abstract-full').style.display = 'inline'; document.getElementById('2306.17550v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.17550v1-abstract-full" style="display: none;"> We introduce TTSWING, a novel dataset designed for table tennis swing analysis. This dataset comprises comprehensive swing information obtained through 9-axis sensors integrated into custom-made racket grips, accompanied by anonymized demographic data of the players. We detail the data collection and annotation procedures. Furthermore, we conduct pilot studies utilizing diverse machine learning models for swing analysis. TTSWING holds tremendous potential to facilitate innovative research in table tennis analysis and is a valuable resource for the scientific community. We release the dataset and experimental codes at https://github.com/DEPhantom/TTSWING. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.17550v1-abstract-full').style.display = 'none'; document.getElementById('2306.17550v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.04879">arXiv:2306.04879</a> <span> [<a href="https://arxiv.org/pdf/2306.04879">pdf</a>, <a href="https://arxiv.org/format/2306.04879">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Augmenting Hessians with Inter-Layer Dependencies for Mixed-Precision Post-Training Quantization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Schaefer%2C+C+J">Clemens JS Schaefer</a>, <a href="/search/cs?searchtype=author&query=Lambert-Shirzad%2C+N">Navid Lambert-Shirzad</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+X">Xiaofan Zhang</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chiachen Chou</a>, <a href="/search/cs?searchtype=author&query=Jablin%2C+T">Tom Jablin</a>, <a href="/search/cs?searchtype=author&query=Li%2C+J">Jian Li</a>, <a href="/search/cs?searchtype=author&query=Guo%2C+E">Elfie Guo</a>, <a href="/search/cs?searchtype=author&query=Stanton%2C+C">Caitlin Stanton</a>, <a href="/search/cs?searchtype=author&query=Joshi%2C+S">Siddharth Joshi</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+Y+E">Yu Emma Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.04879v1-abstract-short" style="display: inline;"> Efficiently serving neural network models with low latency is becoming more challenging due to increasing model complexity and parameter count. Model quantization offers a solution which simultaneously reduces memory footprint and compute requirements. However, aggressive quantization may lead to an unacceptable loss in model accuracy owing to differences in sensitivity to numerical imperfection a… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.04879v1-abstract-full').style.display = 'inline'; document.getElementById('2306.04879v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.04879v1-abstract-full" style="display: none;"> Efficiently serving neural network models with low latency is becoming more challenging due to increasing model complexity and parameter count. Model quantization offers a solution which simultaneously reduces memory footprint and compute requirements. However, aggressive quantization may lead to an unacceptable loss in model accuracy owing to differences in sensitivity to numerical imperfection across different layers in the model. To address this challenge, we propose a mixed-precision post training quantization (PTQ) approach that assigns different numerical precisions to tensors in a network based on their specific needs, for a reduced memory footprint and improved latency while preserving model accuracy. Previous works rely on layer-wise Hessian information to determine numerical precision, but as we demonstrate, Hessian estimation is typically insufficient in determining an effective ordering of layer sensitivities. We address this by augmenting the estimated Hessian with additional information to capture inter-layer dependencies. We demonstrate that this consistently improves PTQ performance along the accuracy-latency Pareto frontier across multiple models. Our method combines second-order information and inter-layer dependencies to guide a bisection search, finding quantization configurations within a user-configurable model accuracy degradation range. We evaluate the effectiveness of our method on the ResNet50, MobileNetV2, and BERT models. Our experiments demonstrate latency reductions compared to a 16-bit baseline of $25.48\%$, $21.69\%$, and $33.28\%$ respectively, while maintaining model accuracy to within $99.99\%$ of the baseline model. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.04879v1-abstract-full').style.display = 'none'; document.getElementById('2306.04879v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2302.01382">arXiv:2302.01382</a> <span> [<a href="https://arxiv.org/pdf/2302.01382">pdf</a>, <a href="https://arxiv.org/format/2302.01382">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Mixed Precision Post Training Quantization of Neural Networks with Sensitivity Guided Search </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Schaefer%2C+C+J">Clemens JS Schaefer</a>, <a href="/search/cs?searchtype=author&query=Guo%2C+E">Elfie Guo</a>, <a href="/search/cs?searchtype=author&query=Stanton%2C+C">Caitlin Stanton</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+X">Xiaofan Zhang</a>, <a href="/search/cs?searchtype=author&query=Jablin%2C+T">Tom Jablin</a>, <a href="/search/cs?searchtype=author&query=Lambert-Shirzad%2C+N">Navid Lambert-Shirzad</a>, <a href="/search/cs?searchtype=author&query=Li%2C+J">Jian Li</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chiachen Chou</a>, <a href="/search/cs?searchtype=author&query=Joshi%2C+S">Siddharth Joshi</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+Y+E">Yu Emma Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2302.01382v2-abstract-short" style="display: inline;"> Serving large-scale machine learning (ML) models efficiently and with low latency has become challenging owing to increasing model size and complexity. Quantizing models can simultaneously reduce memory and compute requirements, facilitating their widespread access. However, for large models not all layers are equally amenable to the same numerical precision and aggressive quantization can lead to… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.01382v2-abstract-full').style.display = 'inline'; document.getElementById('2302.01382v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2302.01382v2-abstract-full" style="display: none;"> Serving large-scale machine learning (ML) models efficiently and with low latency has become challenging owing to increasing model size and complexity. Quantizing models can simultaneously reduce memory and compute requirements, facilitating their widespread access. However, for large models not all layers are equally amenable to the same numerical precision and aggressive quantization can lead to unacceptable loss in model accuracy. One approach to prevent this accuracy degradation is mixed-precision quantization, which allows different tensors to be quantized to varying levels of numerical precision, leveraging the capabilities of modern hardware. Such mixed-precision quantiztaion can more effectively allocate numerical precision to different tensors `as needed' to preserve model accuracy while reducing footprint and compute latency. In this paper, we propose a method to efficiently determine quantization configurations of different tensors in ML models using post-training mixed precision quantization. We analyze three sensitivity metrics and evaluate them for guiding configuration search of two algorithms. We evaluate our method for computer vision and natural language processing and demonstrate latency reductions of up to 27.59% and 34.31% compared to the baseline 16-bit floating point model while guaranteeing no more than 1% accuracy degradation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.01382v2-abstract-full').style.display = 'none'; document.getElementById('2302.01382v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 February, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 February, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2212.13697">arXiv:2212.13697</a> <span> [<a href="https://arxiv.org/pdf/2212.13697">pdf</a>, <a href="https://arxiv.org/format/2212.13697">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> Network Characteristics of LEO Satellite Constellations: A Starlink-Based Measurement from End Users </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ma%2C+S">Sami Ma</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+Y+C">Yi Ching Chou</a>, <a href="/search/cs?searchtype=author&query=Zhao%2C+H">Haoyuan Zhao</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+L">Long Chen</a>, <a href="/search/cs?searchtype=author&query=Ma%2C+X">Xiaoqiang Ma</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+J">Jiangchuan Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2212.13697v1-abstract-short" style="display: inline;"> Low Earth orbit Satellite Networks (LSNs) have been advocated as a key infrastructure for truly global coverage in the forthcoming 6G. This paper presents our initial measurement results and observations on the end-to-end network characteristics of Starlink, arguably the largest LSN constellation to date. Our findings confirm that LSNs are a promising solution towards ubiquitous Internet coverage… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.13697v1-abstract-full').style.display = 'inline'; document.getElementById('2212.13697v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2212.13697v1-abstract-full" style="display: none;"> Low Earth orbit Satellite Networks (LSNs) have been advocated as a key infrastructure for truly global coverage in the forthcoming 6G. This paper presents our initial measurement results and observations on the end-to-end network characteristics of Starlink, arguably the largest LSN constellation to date. Our findings confirm that LSNs are a promising solution towards ubiquitous Internet coverage over the Earth; yet, we also find that the users of Starlink experience much more dynamics in throughput and latency than terrestrial network users, and even frequent outages. Its user experiences are heavily affected by environmental factors such as terrain, solar storms, rain, clouds, and temperature, so is the power consumption. We further analyze Starlink's current bent-pipe relay strategy and its limits, particularly for cross-ocean routes. We have also explored its mobility and portability potentials, and extended our experiments from urban cities to wild remote areas that are facing distinct practical and cultural challenges. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.13697v1-abstract-full').style.display = 'none'; document.getElementById('2212.13697v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 20 figures, to be published in IEEE INFOCOM 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.08763">arXiv:2209.08763</a> <span> [<a href="https://arxiv.org/pdf/2209.08763">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Decentralized Vehicle Coordination: The Berkeley DeepDrive Drone Dataset </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wu%2C+F">Fangyu Wu</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+D">Dequan Wang</a>, <a href="/search/cs?searchtype=author&query=Hwang%2C+M">Minjune Hwang</a>, <a href="/search/cs?searchtype=author&query=Hao%2C+C">Chenhui Hao</a>, <a href="/search/cs?searchtype=author&query=Lu%2C+J">Jiawei Lu</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+J">Jiamu Zhang</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Christopher Chou</a>, <a href="/search/cs?searchtype=author&query=Darrell%2C+T">Trevor Darrell</a>, <a href="/search/cs?searchtype=author&query=Bayen%2C+A">Alexandre Bayen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.08763v2-abstract-short" style="display: inline;"> Decentralized multiagent planning has been an important field of research in robotics. An interesting and impactful application in the field is decentralized vehicle coordination in understructured road environments. For example, in an intersection, it is useful yet difficult to deconflict multiple vehicles of intersecting paths in absence of a central coordinator. We learn from common sense that,… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.08763v2-abstract-full').style.display = 'inline'; document.getElementById('2209.08763v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.08763v2-abstract-full" style="display: none;"> Decentralized multiagent planning has been an important field of research in robotics. An interesting and impactful application in the field is decentralized vehicle coordination in understructured road environments. For example, in an intersection, it is useful yet difficult to deconflict multiple vehicles of intersecting paths in absence of a central coordinator. We learn from common sense that, for a vehicle to navigate through such understructured environments, the driver must understand and conform to the implicit "social etiquette" observed by nearby drivers. To study this implicit driving protocol, we collect the Berkeley DeepDrive Drone dataset. The dataset contains 1) a set of aerial videos recording understructured driving, 2) a collection of images and annotations to train vehicle detection models, and 3) a kit of development scripts for illustrating typical usages. We believe that the dataset is of primary interest for studying decentralized multiagent planning employed by human drivers and, of secondary interest, for computer vision in remote sensing settings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.08763v2-abstract-full').style.display = 'none'; document.getElementById('2209.08763v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">6 pages, 10 figures, 1 table</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2207.02738">arXiv:2207.02738</a> <span> [<a href="https://arxiv.org/pdf/2207.02738">pdf</a>, <a href="https://arxiv.org/format/2207.02738">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> A Hybrid Approach for Binary Classification of Imbalanced Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Tsai%2C+H">Hsin-Han Tsai</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+T">Ta-Wei Yang</a>, <a href="/search/cs?searchtype=author&query=Wong%2C+W">Wai-Man Wong</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Cheng-Fu Chou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2207.02738v2-abstract-short" style="display: inline;"> Binary classification with an imbalanced dataset is challenging. Models tend to consider all samples as belonging to the majority class. Although existing solutions such as sampling methods, cost-sensitive methods, and ensemble learning methods improve the poor accuracy of the minority class, these methods are limited by overfitting problems or cost parameters that are difficult to decide. We prop… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.02738v2-abstract-full').style.display = 'inline'; document.getElementById('2207.02738v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2207.02738v2-abstract-full" style="display: none;"> Binary classification with an imbalanced dataset is challenging. Models tend to consider all samples as belonging to the majority class. Although existing solutions such as sampling methods, cost-sensitive methods, and ensemble learning methods improve the poor accuracy of the minority class, these methods are limited by overfitting problems or cost parameters that are difficult to decide. We propose HADR, a hybrid approach with dimension reduction that consists of data block construction, dimentionality reduction, and ensemble learning with deep neural network classifiers. We evaluate the performance on eight imbalanced public datasets in terms of recall, G-mean, and AUC. The results show that our model outperforms state-of-the-art methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.02738v2-abstract-full').style.display = 'none'; document.getElementById('2207.02738v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 6 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.11016">arXiv:2205.11016</a> <span> [<a href="https://arxiv.org/pdf/2205.11016">pdf</a>, <a href="https://arxiv.org/format/2205.11016">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> </div> <p class="title is-5 mathjax"> MolMiner: You only look once for chemical structure recognition </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Xu%2C+Y">Youjun Xu</a>, <a href="/search/cs?searchtype=author&query=Xiao%2C+J">Jinchuan Xiao</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chia-Han Chou</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+J">Jianhang Zhang</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+J">Jintao Zhu</a>, <a href="/search/cs?searchtype=author&query=Hu%2C+Q">Qiwan Hu</a>, <a href="/search/cs?searchtype=author&query=Li%2C+H">Hemin Li</a>, <a href="/search/cs?searchtype=author&query=Han%2C+N">Ningsheng Han</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+B">Bingyu Liu</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+S">Shuaipeng Zhang</a>, <a href="/search/cs?searchtype=author&query=Han%2C+J">Jinyu Han</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+Z">Zhen Zhang</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+S">Shuhao Zhang</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+W">Weilin Zhang</a>, <a href="/search/cs?searchtype=author&query=Lai%2C+L">Luhua Lai</a>, <a href="/search/cs?searchtype=author&query=Pei%2C+J">Jianfeng Pei</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.11016v1-abstract-short" style="display: inline;"> Molecular structures are always depicted as 2D printed form in scientific documents like journal papers and patents. However, these 2D depictions are not machine-readable. Due to a backlog of decades and an increasing amount of these printed literature, there is a high demand for the translation of printed depictions into machine-readable formats, which is known as Optical Chemical Structure Recog… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.11016v1-abstract-full').style.display = 'inline'; document.getElementById('2205.11016v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.11016v1-abstract-full" style="display: none;"> Molecular structures are always depicted as 2D printed form in scientific documents like journal papers and patents. However, these 2D depictions are not machine-readable. Due to a backlog of decades and an increasing amount of these printed literature, there is a high demand for the translation of printed depictions into machine-readable formats, which is known as Optical Chemical Structure Recognition (OCSR). Most OCSR systems developed over the last three decades follow a rule-based approach where the key step of vectorization of the depiction is based on the interpretation of vectors and nodes as bonds and atoms. Here, we present a practical software MolMiner, which is primarily built up using deep neural networks originally developed for semantic segmentation and object detection to recognize atom and bond elements from documents. These recognized elements can be easily connected as a molecular graph with distance-based construction algorithm. We carefully evaluate our software on four benchmark datasets with the state-of-the-art performance. Various real application scenarios are also tested, yielding satisfactory outcomes. The free download links of Mac and Windows versions are available: Mac: https://molminer-cdn.iipharma.cn/pharma-mind/artifact/latest/mac/PharmaMind-mac-latest-setup.dmg and Windows: https://molminer-cdn.iipharma.cn/pharma-mind/artifact/latest/win/PharmaMind-win-latest-setup.exe <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.11016v1-abstract-full').style.display = 'none'; document.getElementById('2205.11016v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">19 pages, 4 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.02345">arXiv:2205.02345</a> <span> [<a href="https://arxiv.org/pdf/2205.02345">pdf</a>, <a href="https://arxiv.org/ps/2205.02345">ps</a>, <a href="https://arxiv.org/format/2205.02345">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> </div> </div> <p class="title is-5 mathjax"> Sketching Approximability of (Weak) Monarchy Predicates </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chi-Ning Chou</a>, <a href="/search/cs?searchtype=author&query=Golovnev%2C+A">Alexander Golovnev</a>, <a href="/search/cs?searchtype=author&query=Shahrasbi%2C+A">Amirbehshad Shahrasbi</a>, <a href="/search/cs?searchtype=author&query=Sudan%2C+M">Madhu Sudan</a>, <a href="/search/cs?searchtype=author&query=Velusamy%2C+S">Santhoshini Velusamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.02345v2-abstract-short" style="display: inline;"> We analyze the sketching approximability of constraint satisfaction problems on Boolean domains, where the constraints are balanced linear threshold functions applied to literals. In~particular, we explore the approximability of monarchy-like functions where the value of the function is determined by a weighted combination of the vote of the first variable (the president) and the sum of the votes… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.02345v2-abstract-full').style.display = 'inline'; document.getElementById('2205.02345v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.02345v2-abstract-full" style="display: none;"> We analyze the sketching approximability of constraint satisfaction problems on Boolean domains, where the constraints are balanced linear threshold functions applied to literals. In~particular, we explore the approximability of monarchy-like functions where the value of the function is determined by a weighted combination of the vote of the first variable (the president) and the sum of the votes of all remaining variables. The pure version of this function is when the president can only be overruled by when all remaining variables agree. For every $k \geq 5$, we show that CSPs where the underlying predicate is a pure monarchy function on $k$ variables have no non-trivial sketching approximation algorithm in $o(\sqrt{n})$ space. We also show infinitely many weaker monarchy functions for which CSPs using such constraints are non-trivially approximable by $O(\log(n))$ space sketching algorithms. Moreover, we give the first example of sketching approximable asymmetric Boolean CSPs. Our results work within the framework of Chou, Golovnev, Sudan, and Velusamy (FOCS 2021) that characterizes the sketching approximability of all CSPs. Their framework can be applied naturally to get a computer-aided analysis of the approximability of any specific constraint satisfaction problem. The novelty of our work is in using their work to get an analysis that applies to infinitely many problems simultaneously. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.02345v2-abstract-full').style.display = 'none'; document.getElementById('2205.02345v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2202.05413">arXiv:2202.05413</a> <span> [<a href="https://arxiv.org/pdf/2202.05413">pdf</a>, <a href="https://arxiv.org/format/2202.05413">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> A Machine-Learning-Aided Visual Analysis Workflow for Investigating Air Pollution Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Kuo%2C+Y">Yun-Hsin Kuo</a>, <a href="/search/cs?searchtype=author&query=Fujiwara%2C+T">Takanori Fujiwara</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C+C+-">Charles C. -K. Chou</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+C">Chun-houh Chen</a>, <a href="/search/cs?searchtype=author&query=Ma%2C+K">Kwan-Liu Ma</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2202.05413v1-abstract-short" style="display: inline;"> Analyzing air pollution data is challenging as there are various analysis focuses from different aspects: feature (what), space (where), and time (when). As in most geospatial analysis problems, besides high-dimensional features, the temporal and spatial dependencies of air pollution induce the complexity of performing analysis. Machine learning methods, such as dimensionality reduction, can extra… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.05413v1-abstract-full').style.display = 'inline'; document.getElementById('2202.05413v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2202.05413v1-abstract-full" style="display: none;"> Analyzing air pollution data is challenging as there are various analysis focuses from different aspects: feature (what), space (where), and time (when). As in most geospatial analysis problems, besides high-dimensional features, the temporal and spatial dependencies of air pollution induce the complexity of performing analysis. Machine learning methods, such as dimensionality reduction, can extract and summarize important information of the data to lift the burden of understanding such a complicated environment. In this paper, we present a methodology that utilizes multiple machine learning methods to uniformly explore these aspects. With this methodology, we develop a visual analytic system that supports a flexible analysis workflow, allowing domain experts to freely explore different aspects based on their analysis needs. We demonstrate the capability of our system and analysis workflow supporting a variety of analysis tasks with multiple use cases. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.05413v1-abstract-full').style.display = 'none'; document.getElementById('2202.05413v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in the Proceedings of IEEE PacificVis 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2202.05189">arXiv:2202.05189</a> <span> [<a href="https://arxiv.org/pdf/2202.05189">pdf</a>, <a href="https://arxiv.org/format/2202.05189">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Understanding Rare Spurious Correlations in Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Yang%2C+Y">Yao-Yuan Yang</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chi-Ning Chou</a>, <a href="/search/cs?searchtype=author&query=Chaudhuri%2C+K">Kamalika Chaudhuri</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2202.05189v3-abstract-short" style="display: inline;"> Neural networks are known to use spurious correlations such as background information for classification. While prior work has looked at spurious correlations that are widespread in the training data, in this work, we investigate how sensitive neural networks are to rare spurious correlations, which may be harder to detect and correct, and may lead to privacy leaks. We introduce spurious patterns… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.05189v3-abstract-full').style.display = 'inline'; document.getElementById('2202.05189v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2202.05189v3-abstract-full" style="display: none;"> Neural networks are known to use spurious correlations such as background information for classification. While prior work has looked at spurious correlations that are widespread in the training data, in this work, we investigate how sensitive neural networks are to rare spurious correlations, which may be harder to detect and correct, and may lead to privacy leaks. We introduce spurious patterns correlated with a fixed class to a few training examples and find that it takes only a handful of such examples for the network to learn the correlation. Furthermore, these rare spurious correlations also impact accuracy and privacy. We empirically and theoretically analyze different factors involved in rare spurious correlations and propose mitigation methods accordingly. Specifically, we observe that $\ell_2$ regularization and adding Gaussian noise to inputs can reduce the undesirable effects. Code available at https://github.com/yangarbiter/rare-spurious-correlation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.05189v3-abstract-full').style.display = 'none'; document.getElementById('2202.05189v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.14013">arXiv:2112.14013</a> <span> [<a href="https://arxiv.org/pdf/2112.14013">pdf</a>, <a href="https://arxiv.org/format/2112.14013">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> Reducing Minor Page Fault Overheads through Enhanced Page Walker </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Tirumalasetty%2C+C">Chandrahas Tirumalasetty</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C+C">Chih Chieh Chou</a>, <a href="/search/cs?searchtype=author&query=Reddy%2C+N">Narasimha Reddy</a>, <a href="/search/cs?searchtype=author&query=Gratz%2C+P">Paul Gratz</a>, <a href="/search/cs?searchtype=author&query=Abouelwafa%2C+A">Ayman Abouelwafa</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.14013v2-abstract-short" style="display: inline;"> Application virtual memory footprints are growing rapidly in all systems from servers down to smartphones. To address this growing demand, system integrators are incorporating ever larger amounts of main memory, warranting rethinking of memory management. In current systems, applications produce page fault exceptions whenever they access virtual memory regions which are not backed by a physical pa… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.14013v2-abstract-full').style.display = 'inline'; document.getElementById('2112.14013v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.14013v2-abstract-full" style="display: none;"> Application virtual memory footprints are growing rapidly in all systems from servers down to smartphones. To address this growing demand, system integrators are incorporating ever larger amounts of main memory, warranting rethinking of memory management. In current systems, applications produce page fault exceptions whenever they access virtual memory regions which are not backed by a physical page. As application memory footprints grow, they induce more and more minor faults. Handling of each minor fault can take few 1000's of CPU-cycles and blocks the application till OS finds a free physical frame. These page faults can be detrimental to the performance, when their frequency of occurrence is high and spread across application run-time. Specifically, lazy allocation induced minor page faults are increasingly impacting application performance. Our evaluation of several workloads indicates an overhead due to minor faults as high as 29% of execution time. In this paper, we propose to mitigate this problem through a hardware, software co-design approach. Specifically we first propose to parallelize portions of the kernel page allocation to run ahead of fault time in a separate thread. Then we propose the Minor Fault Offload Engine(MFOE), a per-core HW accelerator for minor fault handling. MFOE is equipped with pre-allocated page frame table that it uses to service a page fault. On a page fault, MFOE picks a pre-allocated page frame from this table, makes an entry for it in the TLB, and updates the page table entry to satisfy the page fault. The pre-allocation frame tables are periodically refreshed by a background kernel thread, which also updates the kernel memory management data-structures. We evaluate this system in the gem5 architectural simulator with a modified Linux kernel. Our results show that MFOE improves the average critical-path fault handling latency by 33x. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.14013v2-abstract-full').style.display = 'none'; document.getElementById('2112.14013v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in ACM Transactions on Architecture and Code Optimization (TACO)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.08928">arXiv:2112.08928</a> <span> [<a href="https://arxiv.org/pdf/2112.08928">pdf</a>, <a href="https://arxiv.org/format/2112.08928">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Emerging Technologies">cs.ET</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Superconductivity">cond-mat.supr-con</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applied Physics">physics.app-ph</span> </div> </div> <p class="title is-5 mathjax"> A Superconducting Nanowire-based Architecture for Neuromorphic Computing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Lombo%2C+A+E">Andres E. Lombo</a>, <a href="/search/cs?searchtype=author&query=Lares%2C+J+E">Jesus E. Lares</a>, <a href="/search/cs?searchtype=author&query=Castellani%2C+M">Matteo Castellani</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chi-Ning Chou</a>, <a href="/search/cs?searchtype=author&query=Lynch%2C+N">Nancy Lynch</a>, <a href="/search/cs?searchtype=author&query=Berggren%2C+K+K">Karl K. Berggren</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.08928v2-abstract-short" style="display: inline;"> Neuromorphic computing is poised to further the success of software-based neural networks by utilizing improved customized hardware. However, the translation of neuromorphic algorithms to hardware specifications is a problem that has been seldom explored. Building superconducting neuromorphic systems requires extensive expertise in both superconducting physics and theoretical neuroscience. In this… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.08928v2-abstract-full').style.display = 'inline'; document.getElementById('2112.08928v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.08928v2-abstract-full" style="display: none;"> Neuromorphic computing is poised to further the success of software-based neural networks by utilizing improved customized hardware. However, the translation of neuromorphic algorithms to hardware specifications is a problem that has been seldom explored. Building superconducting neuromorphic systems requires extensive expertise in both superconducting physics and theoretical neuroscience. In this work, we aim to bridge this gap by presenting a tool and methodology to translate algorithmic parameters into circuit specifications. We first show the correspondence between theoretical neuroscience models and the dynamics of our circuit topologies. We then apply this tool to solve linear systems by implementing a spiking neural network with our superconducting nanowire-based hardware. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.08928v2-abstract-full').style.display = 'none'; document.getElementById('2112.08928v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">29 pages, 10 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.01657">arXiv:2112.01657</a> <span> [<a href="https://arxiv.org/pdf/2112.01657">pdf</a>, <a href="https://arxiv.org/format/2112.01657">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Statistical Mechanics">cond-mat.stat-mech</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1103/PRXQuantum.5.010334">10.1103/PRXQuantum.5.010334 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Limitations of Linear Cross-Entropy as a Measure for Quantum Advantage </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gao%2C+X">Xun Gao</a>, <a href="/search/cs?searchtype=author&query=Kalinowski%2C+M">Marcin Kalinowski</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chi-Ning Chou</a>, <a href="/search/cs?searchtype=author&query=Lukin%2C+M+D">Mikhail D. Lukin</a>, <a href="/search/cs?searchtype=author&query=Barak%2C+B">Boaz Barak</a>, <a href="/search/cs?searchtype=author&query=Choi%2C+S">Soonwon Choi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.01657v1-abstract-short" style="display: inline;"> Demonstrating quantum advantage requires experimental implementation of a computational task that is hard to achieve using state-of-the-art classical systems. One approach is to perform sampling from a probability distribution associated with a class of highly entangled many-body wavefunctions. It has been suggested that this approach can be certified with the Linear Cross-Entropy Benchmark (XEB).… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.01657v1-abstract-full').style.display = 'inline'; document.getElementById('2112.01657v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.01657v1-abstract-full" style="display: none;"> Demonstrating quantum advantage requires experimental implementation of a computational task that is hard to achieve using state-of-the-art classical systems. One approach is to perform sampling from a probability distribution associated with a class of highly entangled many-body wavefunctions. It has been suggested that this approach can be certified with the Linear Cross-Entropy Benchmark (XEB). We critically examine this notion. First, in a "benign" setting where an honest implementation of noisy quantum circuits is assumed, we characterize the conditions under which the XEB approximates the fidelity. Second, in an "adversarial" setting where all possible classical algorithms are considered for comparison, we show that achieving relatively high XEB values does not imply faithful simulation of quantum dynamics. We present an efficient classical algorithm that, with 1 GPU within 2s, yields high XEB values, namely 2-12% of those obtained in experiments. By identifying and exploiting several vulnerabilities of the XEB, we achieve high XEB values without full simulation of quantum circuits. Remarkably, our algorithm features better scaling with the system size than noisy quantum devices for commonly studied random circuit ensembles. To quantitatively explain the success of our algorithm and the limitations of the XEB, we use a theoretical framework in which the average XEB and fidelity are mapped to statistical models. We illustrate the relation between the XEB and the fidelity for quantum circuits in various architectures, with different gate choices, and in the presence of noise. Our results show that XEB's utility as a proxy for fidelity hinges on several conditions, which must be checked in the benign setting but cannot be assumed in the adversarial setting. Thus, the XEB alone has limited utility as a benchmark for quantum advantage. We discuss ways to overcome these limitations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.01657v1-abstract-full').style.display = 'none'; document.getElementById('2112.01657v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">25+33 pages, 13+16 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> MIT-CTP/5321 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.05280">arXiv:2110.05280</a> <span> [<a href="https://arxiv.org/pdf/2110.05280">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Multi-institutional Validation of Two-Streamed Deep Learning Method for Automated Delineation of Esophageal Gross Tumor Volume using planning-CT and FDG-PETCT </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ye%2C+X">Xianghua Ye</a>, <a href="/search/cs?searchtype=author&query=Guo%2C+D">Dazhou Guo</a>, <a href="/search/cs?searchtype=author&query=Tseng%2C+C">Chen-kan Tseng</a>, <a href="/search/cs?searchtype=author&query=Ge%2C+J">Jia Ge</a>, <a href="/search/cs?searchtype=author&query=Hung%2C+T">Tsung-Min Hung</a>, <a href="/search/cs?searchtype=author&query=Pai%2C+P">Ping-Ching Pai</a>, <a href="/search/cs?searchtype=author&query=Ren%2C+Y">Yanping Ren</a>, <a href="/search/cs?searchtype=author&query=Zheng%2C+L">Lu Zheng</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+X">Xinli Zhu</a>, <a href="/search/cs?searchtype=author&query=Peng%2C+L">Ling Peng</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+Y">Ying Chen</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+X">Xiaohua Chen</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chen-Yu Chou</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+D">Danni Chen</a>, <a href="/search/cs?searchtype=author&query=Yu%2C+J">Jiaze Yu</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+Y">Yuzhen Chen</a>, <a href="/search/cs?searchtype=author&query=Jiao%2C+F">Feiran Jiao</a>, <a href="/search/cs?searchtype=author&query=Xin%2C+Y">Yi Xin</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+L">Lingyun Huang</a>, <a href="/search/cs?searchtype=author&query=Xie%2C+G">Guotong Xie</a>, <a href="/search/cs?searchtype=author&query=Xiao%2C+J">Jing Xiao</a>, <a href="/search/cs?searchtype=author&query=Lu%2C+L">Le Lu</a>, <a href="/search/cs?searchtype=author&query=Yan%2C+S">Senxiang Yan</a>, <a href="/search/cs?searchtype=author&query=Jin%2C+D">Dakai Jin</a>, <a href="/search/cs?searchtype=author&query=Ho%2C+T">Tsung-Ying Ho</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.05280v1-abstract-short" style="display: inline;"> Background: The current clinical workflow for esophageal gross tumor volume (GTV) contouring relies on manual delineation of high labor-costs and interuser variability. Purpose: To validate the clinical applicability of a deep learning (DL) multi-modality esophageal GTV contouring model, developed at 1 institution whereas tested at multiple ones. Methods and Materials: We collected 606 esophageal… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.05280v1-abstract-full').style.display = 'inline'; document.getElementById('2110.05280v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.05280v1-abstract-full" style="display: none;"> Background: The current clinical workflow for esophageal gross tumor volume (GTV) contouring relies on manual delineation of high labor-costs and interuser variability. Purpose: To validate the clinical applicability of a deep learning (DL) multi-modality esophageal GTV contouring model, developed at 1 institution whereas tested at multiple ones. Methods and Materials: We collected 606 esophageal cancer patients from four institutions. 252 institution-1 patients had a treatment planning-CT (pCT) and a pair of diagnostic FDG-PETCT; 354 patients from other 3 institutions had only pCT. A two-streamed DL model for GTV segmentation was developed using pCT and PETCT scans of a 148 patient institution-1 subset. This built model had the flexibility of segmenting GTVs via only pCT or pCT+PETCT combined. For independent evaluation, the rest 104 institution-1 patients behaved as unseen internal testing, and 354 institutions 2-4 patients were used for external testing. We evaluated manual revision degrees by human experts to assess the contour-editing effort. The performance of the deep model was compared against 4 radiation oncologists in a multiuser study with 20 random external patients. Contouring accuracy and time were recorded for the pre-and post-DL assisted delineation process. Results: Our model achieved high segmentation accuracy in internal testing (mean Dice score: 0.81 using pCT and 0.83 using pCT+PET) and generalized well to external evaluation (mean DSC: 0.80). Expert assessment showed that the predicted contours of 88% patients need only minor or no revision. In multi-user evaluation, with the assistance of a deep model, inter-observer variation and required contouring time were reduced by 37.6% and 48.0%, respectively. Conclusions: Deep learning predicted GTV contours were in close agreement with the ground truth and could be adopted clinically with mostly minor or no changes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.05280v1-abstract-full').style.display = 'none'; document.getElementById('2110.05280v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">36 pages, 10 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.01444">arXiv:2110.01444</a> <span> [<a href="https://arxiv.org/pdf/2110.01444">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Numerical Analysis">math.NA</span> </div> </div> <p class="title is-5 mathjax"> Crashworthiness design of 3D lattice-structure filled thin-walled tubes based on data mining </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Lv%2C+J">Jiyuan Lv</a>, <a href="/search/cs?searchtype=author&query=Bai%2C+Z">Zhonghao Bai</a>, <a href="/search/cs?searchtype=author&query=Du%2C+X">Xianping Du</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+F">Feng Zhu</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C+C">Clifford C. Chou</a>, <a href="/search/cs?searchtype=author&query=Jiang%2C+B">Binhui Jiang</a>, <a href="/search/cs?searchtype=author&query=Xu%2C+S">Shiwei Xu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.01444v1-abstract-short" style="display: inline;"> Lattice structures and thin-walled tubes are two types of energy-absorbers widely studied and applied in engineering practice. In this study, a new type of lattice-structure filled thin-walled tube (LFT) was proposed. In this new type of LFT, a BCC-Z lattice structure was filled into a square thin-walled tube. Then using data mining, a 3-D geometric design with five design variables was conducted… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.01444v1-abstract-full').style.display = 'inline'; document.getElementById('2110.01444v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.01444v1-abstract-full" style="display: none;"> Lattice structures and thin-walled tubes are two types of energy-absorbers widely studied and applied in engineering practice. In this study, a new type of lattice-structure filled thin-walled tube (LFT) was proposed. In this new type of LFT, a BCC-Z lattice structure was filled into a square thin-walled tube. Then using data mining, a 3-D geometric design with five design variables was conducted on this new LFT. Using Latin Hypercubic sampling algorithm, 150 design cases were generated. Numerical models were then developed to simulate their crush behavior, and the simulation dataset was used for data mining. The results showed that (1) Filling the BBC-Z lattice structure into a thin-walled tube can significantly improve the energy absorption (EA) capacity of the structure. (2) The decision trees generated in the data mining process indicated that the rod diameter d of lattice structure is the key design variable that has most significant impact on EA, followed by m and n. (3) The design rules to build LFTs with high EA efficiency (SEA>=16 kJ/kg and CFE>=45%), high total EA (SEA>=16 kJ/kg and EA>=6 kJ), and lightweight (SEA>=16 kJ/kg and Mass<=0.45 kg) were obtained from decision trees. The ideal configurations of LFT corresponding to these three objectives are: d>2 mm, n>2 and m>3 for high EA efficiency; d>2 mm, n>2 and m>3 for high total EA; and d>2 mm, n>2, m<=4 and t<=1.7 mm for lightweight. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.01444v1-abstract-full').style.display = 'none'; document.getElementById('2110.01444v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.03048">arXiv:2109.03048</a> <span> [<a href="https://arxiv.org/pdf/2109.03048">pdf</a>, <a href="https://arxiv.org/format/2109.03048">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Early ICU Mortality Prediction and Survival Analysis for Respiratory Failure </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Yin%2C+Y">Yilin Yin</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chun-An Chou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.03048v1-abstract-short" style="display: inline;"> Respiratory failure is the one of major causes of death in critical care unit. During the outbreak of COVID-19, critical care units experienced an extreme shortage of mechanical ventilation because of respiratory failure related syndromes. To help this, the early mortality risk prediction in patients who suffer respiratory failure can provide timely support for clinical treatment and resource mana… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.03048v1-abstract-full').style.display = 'inline'; document.getElementById('2109.03048v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.03048v1-abstract-full" style="display: none;"> Respiratory failure is the one of major causes of death in critical care unit. During the outbreak of COVID-19, critical care units experienced an extreme shortage of mechanical ventilation because of respiratory failure related syndromes. To help this, the early mortality risk prediction in patients who suffer respiratory failure can provide timely support for clinical treatment and resource management. In the study, we propose a dynamic modeling approach for early mortality risk prediction of the respiratory failure patients based on the first 24 hours ICU physiological data. Our proposed model is validated on the eICU collaborate database. We achieved a high AUROC performance (80-83%) and significantly improved AUCPR 4% on Day 5 since ICU admission, compared to the state-of-art prediction models. In addition, we illustrated that the survival curve includes the time-varying information for the early ICU admission survival analysis. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.03048v1-abstract-full').style.display = 'none'; document.getElementById('2109.03048v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.01773">arXiv:2109.01773</a> <span> [<a href="https://arxiv.org/pdf/2109.01773">pdf</a>, <a href="https://arxiv.org/format/2109.01773">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> MLCTR: A Fast Scalable Coupled Tensor Completion Based on Multi-Layer Non-Linear Matrix Factorization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Uddin%2C+A">Ajim Uddin</a>, <a href="/search/cs?searchtype=author&query=Zhou%2C+D">Dan Zhou</a>, <a href="/search/cs?searchtype=author&query=Tao%2C+X">Xinyuan Tao</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chia-Ching Chou</a>, <a href="/search/cs?searchtype=author&query=Yu%2C+D">Dantong Yu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.01773v1-abstract-short" style="display: inline;"> Firms earning prediction plays a vital role in investment decisions, dividends expectation, and share price. It often involves multiple tensor-compatible datasets with non-linear multi-way relationships, spatiotemporal structures, and different levels of sparsity. Current non-linear tensor completion algorithms tend to learn noisy embedding and incur overfitting. This paper focuses on the embeddin… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.01773v1-abstract-full').style.display = 'inline'; document.getElementById('2109.01773v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.01773v1-abstract-full" style="display: none;"> Firms earning prediction plays a vital role in investment decisions, dividends expectation, and share price. It often involves multiple tensor-compatible datasets with non-linear multi-way relationships, spatiotemporal structures, and different levels of sparsity. Current non-linear tensor completion algorithms tend to learn noisy embedding and incur overfitting. This paper focuses on the embedding learning aspect of the tensor completion problem and proposes a new multi-layer neural network architecture for tensor factorization and completion (MLCTR). The network architecture entails multiple advantages: a series of low-rank matrix factorizations (MF) building blocks to minimize overfitting, interleaved transfer functions in each layer for non-linearity, and by-pass connections to reduce the gradient diminishing problem and increase the depths of neural networks. Furthermore, the model employs Stochastic Gradient Descent(SGD) based optimization for fast convergence in training. Our algorithm is highly efficient for imputing missing values in the EPS data. Experiments confirm that our strategy of incorporating non-linearity in factor matrices demonstrates impressive performance in embedding learning and end-to-end tensor models, and outperforms approaches with non-linearity in the phase of reconstructing tensors from factor matrices. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.01773v1-abstract-full').style.display = 'none'; document.getElementById('2109.01773v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2108.09996">arXiv:2108.09996</a> <span> [<a href="https://arxiv.org/pdf/2108.09996">pdf</a>, <a href="https://arxiv.org/format/2108.09996">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> MS-DARTS: Mean-Shift Based Differentiable Architecture Search </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Hsieh%2C+J">Jun-Wei Hsieh</a>, <a href="/search/cs?searchtype=author&query=Chang%2C+M">Ming-Ching Chang</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+P">Ping-Yang Chen</a>, <a href="/search/cs?searchtype=author&query=Santra%2C+S">Santanu Santra</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Cheng-Han Chou</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+C">Chih-Sheng Huang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2108.09996v4-abstract-short" style="display: inline;"> Differentiable Architecture Search (DARTS) is an effective continuous relaxation-based network architecture search (NAS) method with low search cost. It has attracted significant attentions in Auto-ML research and becomes one of the most useful paradigms in NAS. Although DARTS can produce superior efficiency over traditional NAS approaches with better control of complex parameters, oftentimes it s… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.09996v4-abstract-full').style.display = 'inline'; document.getElementById('2108.09996v4-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2108.09996v4-abstract-full" style="display: none;"> Differentiable Architecture Search (DARTS) is an effective continuous relaxation-based network architecture search (NAS) method with low search cost. It has attracted significant attentions in Auto-ML research and becomes one of the most useful paradigms in NAS. Although DARTS can produce superior efficiency over traditional NAS approaches with better control of complex parameters, oftentimes it suffers from stabilization issues in producing deteriorating architectures when discretizing the continuous architecture. We observed considerable loss of validity causing dramatic decline in performance at this final discretization step of DARTS. To address this issue, we propose a Mean-Shift based DARTS (MS-DARTS) to improve stability based on sampling and perturbation. Our approach can improve bot the stability and accuracy of DARTS, by smoothing the loss landscape and sampling architecture parameters within a suitable bandwidth. We investigate the convergence of our mean-shift approach, together with the effects of bandwidth selection that affects stability and accuracy. Evaluations performed on CIFAR-10, CIFAR-100, and ImageNet show that MS-DARTS archives higher performance over other state-of-the-art NAS methods with reduced search cost. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.09996v4-abstract-full').style.display = 'none'; document.getElementById('2108.09996v4-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2108.06049">arXiv:2108.06049</a> <span> [<a href="https://arxiv.org/pdf/2108.06049">pdf</a>, <a href="https://arxiv.org/format/2108.06049">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> </div> </div> <p class="title is-5 mathjax"> Limitations of Local Quantum Algorithms on Random Max-k-XOR and Beyond </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chi-Ning Chou</a>, <a href="/search/cs?searchtype=author&query=Love%2C+P+J">Peter J. Love</a>, <a href="/search/cs?searchtype=author&query=Sandhu%2C+J+S">Juspreet Singh Sandhu</a>, <a href="/search/cs?searchtype=author&query=Shi%2C+J">Jonathan Shi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2108.06049v3-abstract-short" style="display: inline;"> We introduce a notion of \emph{generic local algorithm} which strictly generalizes existing frameworks of local algorithms such as \emph{factors of i.i.d.} by capturing local \emph{quantum} algorithms such as the Quantum Approximate Optimization Algorithm (QAOA). Motivated by a question of Farhi et al. [arXiv:1910.08187, 2019] we then show limitations of generic local algorithms including QAOA o… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.06049v3-abstract-full').style.display = 'inline'; document.getElementById('2108.06049v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2108.06049v3-abstract-full" style="display: none;"> We introduce a notion of \emph{generic local algorithm} which strictly generalizes existing frameworks of local algorithms such as \emph{factors of i.i.d.} by capturing local \emph{quantum} algorithms such as the Quantum Approximate Optimization Algorithm (QAOA). Motivated by a question of Farhi et al. [arXiv:1910.08187, 2019] we then show limitations of generic local algorithms including QAOA on random instances of constraint satisfaction problems (CSPs). Specifically, we show that any generic local algorithm whose assignment to a vertex depends only on a local neighborhood with $o(n)$ other vertices (such as the QAOA at depth less than $蔚\log(n)$) cannot arbitrarily-well approximate boolean CSPs if the problem satisfies a geometric property from statistical physics called the coupled overlap-gap property (OGP) [Chen et al., Annals of Probability, 47(3), 2019]. We show that the random MAX-k-XOR problem has this property when $k\geq4$ is even by extending the corresponding result for diluted $k$-spin glasses. Our concentration lemmas confirm a conjecture of Brandao et al. [arXiv:1812.04170, 2018] asserting that the landscape independence of QAOA extends to logarithmic depth -- in other words, for every fixed choice of QAOA angle parameters, the algorithm at logarithmic depth performs almost equally well on almost all instances. One of these concentration lemmas is a strengthening of McDiarmid's inequality, applicable when the random variables have a highly biased distribution, and may be of independent interest. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.06049v3-abstract-full').style.display = 'none'; document.getElementById('2108.06049v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">59 pages, 2 figures. Third version has an updated abstract, an introduction with a more complete literature review, and open questions, as well as a fix to some typos in Section-5 and Section-6. The second version was updated with a new proof that demonstrated a coupled OGP for Random Max-k-XOR (signed)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2108.03171">arXiv:2108.03171</a> <span> [<a href="https://arxiv.org/pdf/2108.03171">pdf</a>, <a href="https://arxiv.org/ps/2108.03171">ps</a>, <a href="https://arxiv.org/format/2108.03171">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Quantum Meets the Minimum Circuit Size Problem </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chia%2C+N">Nai-Hui Chia</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chi-Ning Chou</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+J">Jiayu Zhang</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+R">Ruizhe Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2108.03171v3-abstract-short" style="display: inline;"> In this work, we initiate the study of the Minimum Circuit Size Problem (MCSP) in the quantum setting. MCSP is a problem to compute the circuit complexity of Boolean functions. It is a fascinating problem in complexity theory -- its hardness is mysterious, and a better understanding of its hardness can have surprising implications to many fields in computer science. We first define and investiga… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.03171v3-abstract-full').style.display = 'inline'; document.getElementById('2108.03171v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2108.03171v3-abstract-full" style="display: none;"> In this work, we initiate the study of the Minimum Circuit Size Problem (MCSP) in the quantum setting. MCSP is a problem to compute the circuit complexity of Boolean functions. It is a fascinating problem in complexity theory -- its hardness is mysterious, and a better understanding of its hardness can have surprising implications to many fields in computer science. We first define and investigate the basic complexity-theoretic properties of minimum quantum circuit size problems for three natural objects: Boolean functions, unitaries, and quantum states. We show that these problems are not trivially in NP but in QCMA (or have QCMA protocols). Next, we explore the relations between the three quantum MCSPs and their variants. We discover that some reductions that are not known for classical MCSP exist for quantum MCSPs for unitaries and states, e.g., search-to-decision reduction and self-reduction. Finally, we systematically generalize results known for classical MCSP to the quantum setting (including quantum cryptography, quantum learning theory, quantum circuit lower bounds, and quantum fine-grained complexity) and also find new connections to tomography and quantum gravity. Due to the fundamental differences between classical and quantum circuits, most of our results require extra care and reveal properties and phenomena unique to the quantum setting. Our findings could be of interest for future studies, and we post several open problems for further exploration along this direction. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.03171v3-abstract-full').style.display = 'none'; document.getElementById('2108.03171v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 6 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.13078">arXiv:2106.13078</a> <span> [<a href="https://arxiv.org/pdf/2106.13078">pdf</a>, <a href="https://arxiv.org/format/2106.13078">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> </div> </div> <p class="title is-5 mathjax"> Linear Space Streaming Lower Bounds for Approximating CSPs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chi-Ning Chou</a>, <a href="/search/cs?searchtype=author&query=Golovnev%2C+A">Alexander Golovnev</a>, <a href="/search/cs?searchtype=author&query=Sudan%2C+M">Madhu Sudan</a>, <a href="/search/cs?searchtype=author&query=Velingker%2C+A">Ameya Velingker</a>, <a href="/search/cs?searchtype=author&query=Velusamy%2C+S">Santhoshini Velusamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.13078v2-abstract-short" style="display: inline;"> We consider the approximability of constraint satisfaction problems in the streaming setting. For every constraint satisfaction problem (CSP) on $n$ variables taking values in $\{0,\ldots,q-1\}$, we prove that improving over the trivial approximability by a factor of $q$ requires $惟(n)$ space even on instances with $O(n)$ constraints. We also identify a broad subclass of problems for which any imp… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.13078v2-abstract-full').style.display = 'inline'; document.getElementById('2106.13078v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.13078v2-abstract-full" style="display: none;"> We consider the approximability of constraint satisfaction problems in the streaming setting. For every constraint satisfaction problem (CSP) on $n$ variables taking values in $\{0,\ldots,q-1\}$, we prove that improving over the trivial approximability by a factor of $q$ requires $惟(n)$ space even on instances with $O(n)$ constraints. We also identify a broad subclass of problems for which any improvement over the trivial approximability requires $惟(n)$ space. The key technical core is an optimal, $q^{-(k-1)}$-inapproximability for the \textsf{Max $k$-LIN}-$\bmod\; q$ problem, which is the Max CSP problem where every constraint is given by a system of $k-1$ linear equations $\bmod\; q$ over $k$ variables. Our work builds on and extends the breakthrough work of Kapralov and Krachun (Proc. STOC 2019) who showed a linear lower bound on any non-trivial approximation of the MaxCut problem in graphs. MaxCut corresponds roughly to the case of \textsf{Max $k$-LIN}-$\bmod\; q$ with ${k=q=2}$. For general CSPs in the streaming setting, prior results only yielded $惟(\sqrt{n})$ space bounds. In particular no linear space lower bound was known for an approximation factor less than $1/2$ for {\em any} CSP. Extending the work of Kapralov and Krachun to \textsf{Max $k$-LIN}-$\bmod\; q$ to $k>2$ and $q>2$ (while getting optimal hardness results) is the main technical contribution of this work. Each one of these extensions provides non-trivial technical challenges that we overcome in this work. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.13078v2-abstract-full').style.display = 'none'; document.getElementById('2106.13078v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 April, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2105.01161">arXiv:2105.01161</a> <span> [<a href="https://arxiv.org/pdf/2105.01161">pdf</a>, <a href="https://arxiv.org/format/2105.01161">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> </div> </div> <p class="title is-5 mathjax"> Sketching approximability of all finite CSPs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chi-Ning Chou</a>, <a href="/search/cs?searchtype=author&query=Golovnev%2C+A">Alexander Golovnev</a>, <a href="/search/cs?searchtype=author&query=Sudan%2C+M">Madhu Sudan</a>, <a href="/search/cs?searchtype=author&query=Velusamy%2C+S">Santhoshini Velusamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2105.01161v5-abstract-short" style="display: inline;"> A constraint satisfaction problem (CSP), $\textsf{Max-CSP}(\mathcal{F})$, is specified by a finite set of constraints $\mathcal{F} \subseteq \{[q]^k \to \{0,1\}\}$ for positive integers $q$ and $k$. An instance of the problem on $n$ variables is given by $m$ applications of constraints from $\mathcal{F}$ to subsequences of the $n$ variables, and the goal is to find an assignment to the variables t… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.01161v5-abstract-full').style.display = 'inline'; document.getElementById('2105.01161v5-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2105.01161v5-abstract-full" style="display: none;"> A constraint satisfaction problem (CSP), $\textsf{Max-CSP}(\mathcal{F})$, is specified by a finite set of constraints $\mathcal{F} \subseteq \{[q]^k \to \{0,1\}\}$ for positive integers $q$ and $k$. An instance of the problem on $n$ variables is given by $m$ applications of constraints from $\mathcal{F}$ to subsequences of the $n$ variables, and the goal is to find an assignment to the variables that satisfies the maximum number of constraints. In the $(纬,尾)$-approximation version of the problem for parameters $0 \leq 尾< 纬\leq 1$, the goal is to distinguish instances where at least $纬$ fraction of the constraints can be satisfied from instances where at most $尾$ fraction of the constraints can be satisfied. In this work we consider the approximability of this problem in the context of sketching algorithms and give a dichotomy result. Specifically, for every family $\mathcal{F}$ and every $尾< 纬$, we show that either a linear sketching algorithm solves the problem in polylogarithmic space, or the problem is not solvable by any sketching algorithm in $o(\sqrt{n})$ space. In particular, we give non-trivial approximation algorithms using polylogarithmic space for infinitely many constraint satisfaction problems. We also extend previously known lower bounds for general streaming algorithms to a wide variety of problems, and in particular the case of $q=k=2$, where we get a dichotomy, and the case when the satisfying assignments of the constraints of $\mathcal{F}$ support a distribution on $[q]^k$ with uniform marginals. Prior to this work, other than sporadic examples, the only systematic classes of CSPs that were analyzed considered the setting of Boolean variables $q=2$, binary constraints $k=2$, singleton families $|\mathcal{F}|=1$ and only considered the setting where constraints are placed on literals rather than variables. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.01161v5-abstract-full').style.display = 'none'; document.getElementById('2105.01161v5-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Updated version to appear in JACM arXiv admin note: text overlap with arXiv:2102.12351</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2104.04492">arXiv:2104.04492</a> <span> [<a href="https://arxiv.org/pdf/2104.04492">pdf</a>, <a href="https://arxiv.org/format/2104.04492">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ACCESS.2023.3243068">10.1109/ACCESS.2023.3243068 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Joint QoS-Aware Scheduling and Precoding for Massive MIMO Systems via Deep Reinforcement Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Huang%2C+C">Chih-Wei Huang</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+Y">Yen-Cheng Chou</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+H">Hong-Yunn Chen</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Cheng-Fu Chou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2104.04492v1-abstract-short" style="display: inline;"> The rapid development of mobile networks proliferates the demands of high data rate, low latency, and high-reliability applications for the fifth-generation (5G) and beyond (B5G) mobile networks. Concurrently, the massive multiple-input-multiple-output (MIMO) technology is essential to realize the vision and requires coordination with resource management functions for high user experiences. Though… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.04492v1-abstract-full').style.display = 'inline'; document.getElementById('2104.04492v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2104.04492v1-abstract-full" style="display: none;"> The rapid development of mobile networks proliferates the demands of high data rate, low latency, and high-reliability applications for the fifth-generation (5G) and beyond (B5G) mobile networks. Concurrently, the massive multiple-input-multiple-output (MIMO) technology is essential to realize the vision and requires coordination with resource management functions for high user experiences. Though conventional cross-layer adaptation algorithms have been developed to schedule and allocate network resources, the complexity of resulting rules is high with diverse quality of service (QoS) requirements and B5G features. In this work, we consider a joint user scheduling, antenna allocation, and precoding problem in a massive MIMO system. Instead of directly assigning resources, such as the number of antennas, the allocation process is transformed into a deep reinforcement learning (DRL) based dynamic algorithm selection problem for efficient Markov decision process (MDP) modeling and policy training. Specifically, the proposed utility function integrates QoS requirements and constraints toward a long-term system-wide objective that matches the MDP return. The componentized action structure with action embedding further incorporates the resource management process into the model. Simulations show 7.2% and 12.5% more satisfied users against static algorithm selection and related works under demanding scenarios. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.04492v1-abstract-full').style.display = 'none'; document.getElementById('2104.04492v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 April, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEEE Access, vol. 11, pp. 13243-13256, 2023 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.12351">arXiv:2102.12351</a> <span> [<a href="https://arxiv.org/pdf/2102.12351">pdf</a>, <a href="https://arxiv.org/format/2102.12351">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> </div> </div> <p class="title is-5 mathjax"> Approximability of all Boolean CSPs with linear sketches </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chi-Ning Chou</a>, <a href="/search/cs?searchtype=author&query=Golovnev%2C+A">Alexander Golovnev</a>, <a href="/search/cs?searchtype=author&query=Sudan%2C+M">Madhu Sudan</a>, <a href="/search/cs?searchtype=author&query=Velusamy%2C+S">Santhoshini Velusamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.12351v8-abstract-short" style="display: inline;"> In this work we consider the approximability of $\textsf{Max-CSP}(f)$ in the context of sketching algorithms and completely characterize the approximability of all Boolean CSPs. Specifically, given $f$, $纬$ and $尾$ we show that either (1) the $(纬,尾)$-approximation version of $\textsf{Max-CSP}(f)$ has a linear sketching algorithm using $O(\log n)$ space, or (2) for every $蔚> 0$ the $(纬-蔚,尾+蔚)$-appr… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.12351v8-abstract-full').style.display = 'inline'; document.getElementById('2102.12351v8-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.12351v8-abstract-full" style="display: none;"> In this work we consider the approximability of $\textsf{Max-CSP}(f)$ in the context of sketching algorithms and completely characterize the approximability of all Boolean CSPs. Specifically, given $f$, $纬$ and $尾$ we show that either (1) the $(纬,尾)$-approximation version of $\textsf{Max-CSP}(f)$ has a linear sketching algorithm using $O(\log n)$ space, or (2) for every $蔚> 0$ the $(纬-蔚,尾+蔚)$-approximation version of $\textsf{Max-CSP}(f)$ requires $惟(\sqrt{n})$ space for any sketching algorithm. We also prove lower bounds against streaming algorithms for several CSPs. In particular, we recover the streaming dichotomy of [CGV20] for $k=2$ and show streaming approximation resistance of all CSPs for which $f^{-1}(1)$ supports a distribution with uniform marginals. Our positive results show wider applicability of bias-based algorithms used previously by [GVV17] and [CGV20] by giving a systematic way to discover biases. Our negative results combine the Fourier analytic methods of [KKS15], which we extend to a wider class of CSPs, with a rich collection of reductions among communication complexity problems that lie at the heart of the negative results. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.12351v8-abstract-full').style.display = 'none'; document.getElementById('2102.12351v8-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.03503">arXiv:2102.03503</a> <span> [<a href="https://arxiv.org/pdf/2102.03503">pdf</a>, <a href="https://arxiv.org/format/2102.03503">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Template-Free Try-on Image Synthesis via Semantic-guided Optimization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chien-Lung Chou</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+C">Chieh-Yun Chen</a>, <a href="/search/cs?searchtype=author&query=Hsieh%2C+C">Chia-Wei Hsieh</a>, <a href="/search/cs?searchtype=author&query=Shuai%2C+H">Hong-Han Shuai</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+J">Jiaying Liu</a>, <a href="/search/cs?searchtype=author&query=Cheng%2C+W">Wen-Huang Cheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.03503v1-abstract-short" style="display: inline;"> The virtual try-on task is so attractive that it has drawn considerable attention in the field of computer vision. However, presenting the three-dimensional (3D) physical characteristic (e.g., pleat and shadow) based on a 2D image is very challenging. Although there have been several previous studies on 2D-based virtual try-on work, most 1) required user-specified target poses that are not user-fr… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.03503v1-abstract-full').style.display = 'inline'; document.getElementById('2102.03503v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.03503v1-abstract-full" style="display: none;"> The virtual try-on task is so attractive that it has drawn considerable attention in the field of computer vision. However, presenting the three-dimensional (3D) physical characteristic (e.g., pleat and shadow) based on a 2D image is very challenging. Although there have been several previous studies on 2D-based virtual try-on work, most 1) required user-specified target poses that are not user-friendly and may not be the best for the target clothing, and 2) failed to address some problematic cases, including facial details, clothing wrinkles and body occlusions. To address these two challenges, in this paper, we propose an innovative template-free try-on image synthesis (TF-TIS) network. The TF-TIS first synthesizes the target pose according to the user-specified in-shop clothing. Afterward, given an in-shop clothing image, a user image, and a synthesized pose, we propose a novel model for synthesizing a human try-on image with the target clothing in the best fitting pose. The qualitative and quantitative experiments both indicate that the proposed TF-TIS outperforms the state-of-the-art methods, especially for difficult cases. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.03503v1-abstract-full').style.display = 'none'; document.getElementById('2102.03503v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by IEEE TNNLS (2021). 14 pages including 2 pages of reference</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2008.11203">arXiv:2008.11203</a> <span> [<a href="https://arxiv.org/pdf/2008.11203">pdf</a>, <a href="https://arxiv.org/format/2008.11203">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Learning to Learn in a Semi-Supervised Fashion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chen%2C+Y">Yun-Chun Chen</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chao-Te Chou</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+Y+F">Yu-Chiang Frank Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2008.11203v1-abstract-short" style="display: inline;"> To address semi-supervised learning from both labeled and unlabeled data, we present a novel meta-learning scheme. We particularly consider that labeled and unlabeled data share disjoint ground truth label sets, which can be seen tasks like in person re-identification or image retrieval. Our learning scheme exploits the idea of leveraging information from labeled to unlabeled data. Instead of fitt… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.11203v1-abstract-full').style.display = 'inline'; document.getElementById('2008.11203v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2008.11203v1-abstract-full" style="display: none;"> To address semi-supervised learning from both labeled and unlabeled data, we present a novel meta-learning scheme. We particularly consider that labeled and unlabeled data share disjoint ground truth label sets, which can be seen tasks like in person re-identification or image retrieval. Our learning scheme exploits the idea of leveraging information from labeled to unlabeled data. Instead of fitting the associated class-wise similarity scores as most meta-learning algorithms do, we propose to derive semantics-oriented similarity representations from labeled data, and transfer such representation to unlabeled ones. Thus, our strategy can be viewed as a self-supervised learning scheme, which can be applied to fully supervised learning tasks for improved performance. Our experiments on various tasks and settings confirm the effectiveness of our proposed approach and its superiority over the state-of-the-art methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.11203v1-abstract-full').style.display = 'none'; document.getElementById('2008.11203v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 August, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ECCV 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2006.06171">arXiv:2006.06171</a> <span> [<a href="https://arxiv.org/pdf/2006.06171">pdf</a>, <a href="https://arxiv.org/format/2006.06171">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> A General Framework for Analyzing Stochastic Dynamics in Learning Algorithms </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chi-Ning Chou</a>, <a href="/search/cs?searchtype=author&query=Sandhu%2C+J+S">Juspreet Singh Sandhu</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+M+B">Mien Brabeeba Wang</a>, <a href="/search/cs?searchtype=author&query=Yu%2C+T">Tiancheng Yu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2006.06171v3-abstract-short" style="display: inline;"> One of the challenges in analyzing learning algorithms is the circular entanglement between the objective value and the stochastic noise. This is also known as the "chicken and egg" phenomenon and traditionally, there is no principled way to tackle this issue. People solve the problem by utilizing the special structure of the dynamic, and hence the analysis would be difficult to generalize. In t… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.06171v3-abstract-full').style.display = 'inline'; document.getElementById('2006.06171v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2006.06171v3-abstract-full" style="display: none;"> One of the challenges in analyzing learning algorithms is the circular entanglement between the objective value and the stochastic noise. This is also known as the "chicken and egg" phenomenon and traditionally, there is no principled way to tackle this issue. People solve the problem by utilizing the special structure of the dynamic, and hence the analysis would be difficult to generalize. In this work, we present a streamlined three-step recipe to tackle the "chicken and egg" problem and give a general framework for analyzing stochastic dynamics in learning algorithms. Our framework composes standard techniques from probability theory, such as stopping time and martingale concentration. We demonstrate the power and flexibility of our framework by giving a unifying analysis for three very different learning problems with the last iterate and the strong uniform high probability convergence guarantee. The problems are stochastic gradient descent for strongly convex functions, streaming principal component analysis, and linear bandit with stochastic gradient descent updates. We either improve or match the state-of-the-art bounds on all three dynamics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.06171v3-abstract-full').style.display = 'none'; document.getElementById('2006.06171v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2005.02421">arXiv:2005.02421</a> <span> [<a href="https://arxiv.org/pdf/2005.02421">pdf</a>, <a href="https://arxiv.org/ps/2005.02421">ps</a>, <a href="https://arxiv.org/format/2005.02421">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> </div> </div> <p class="title is-5 mathjax"> Spoofing Linear Cross-Entropy Benchmarking in Shallow Quantum Circuits </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Barak%2C+B">Boaz Barak</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chi-Ning Chou</a>, <a href="/search/cs?searchtype=author&query=Gao%2C+X">Xun Gao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2005.02421v1-abstract-short" style="display: inline;"> The linear cross-entropy benchmark (Linear XEB) has been used as a test for procedures simulating quantum circuits. Given a quantum circuit $C$ with $n$ inputs and outputs and purported simulator whose output is distributed according to a distribution $p$ over $\{0,1\}^n$, the linear XEB fidelity of the simulator is $\mathcal{F}_{C}(p) = 2^n \mathbb{E}_{x \sim p} q_C(x) -1$ where $q_C(x)$ is the p… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.02421v1-abstract-full').style.display = 'inline'; document.getElementById('2005.02421v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2005.02421v1-abstract-full" style="display: none;"> The linear cross-entropy benchmark (Linear XEB) has been used as a test for procedures simulating quantum circuits. Given a quantum circuit $C$ with $n$ inputs and outputs and purported simulator whose output is distributed according to a distribution $p$ over $\{0,1\}^n$, the linear XEB fidelity of the simulator is $\mathcal{F}_{C}(p) = 2^n \mathbb{E}_{x \sim p} q_C(x) -1$ where $q_C(x)$ is the probability that $x$ is output from the distribution $C|0^n\rangle$. A trivial simulator (e.g., the uniform distribution) satisfies $\mathcal{F}_C(p)=0$, while Google's noisy quantum simulation of a 53 qubit circuit $C$ achieved a fidelity value of $(2.24\pm0.21)\times10^{-3}$ (Arute et. al., Nature'19). In this work we give a classical randomized algorithm that for a given circuit $C$ of depth $d$ with Haar random 2-qubit gates achieves in expectation a fidelity value of $惟(\tfrac{n}{L} \cdot 15^{-d})$ in running time $\textsf{poly}(n,2^L)$. Here $L$ is the size of the \emph{light cone} of $C$: the maximum number of input bits that each output bit depends on. In particular, we obtain a polynomial-time algorithm that achieves large fidelity of $蠅(1)$ for depth $O(\sqrt{\log n})$ two-dimensional circuits. To our knowledge, this is the first such result for two dimensional circuits of super-constant depth. Our results can be considered as an evidence that fooling the linear XEB test might be easier than achieving a full simulation of the quantum circuit. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.02421v1-abstract-full').style.display = 'none'; document.getElementById('2005.02421v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 May, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2004.11796">arXiv:2004.11796</a> <span> [<a href="https://arxiv.org/pdf/2004.11796">pdf</a>, <a href="https://arxiv.org/format/2004.11796">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> </div> </div> <p class="title is-5 mathjax"> Optimal Streaming Approximations for all Boolean Max-2CSPs and Max-kSAT </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chou%2C+C">Chi-Ning Chou</a>, <a href="/search/cs?searchtype=author&query=Golovnev%2C+A">Alexander Golovnev</a>, <a href="/search/cs?searchtype=author&query=Velusamy%2C+S">Santhoshini Velusamy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2004.11796v4-abstract-short" style="display: inline;"> We prove tight upper and lower bounds on approximation ratios of all Boolean Max-2CSP problems in the streaming model. Specifically, for every type of Max-2CSP problem, we give an explicit constant $伪$, s.t. for any $蔚>0$ (i) there is an $(伪-蔚)$-streaming approximation using space $O(\log{n})$; and (ii) any $(伪+蔚)$-streaming approximation requires space $惟(\sqrt{n})$. This generalizes the celebrat… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.11796v4-abstract-full').style.display = 'inline'; document.getElementById('2004.11796v4-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2004.11796v4-abstract-full" style="display: none;"> We prove tight upper and lower bounds on approximation ratios of all Boolean Max-2CSP problems in the streaming model. Specifically, for every type of Max-2CSP problem, we give an explicit constant $伪$, s.t. for any $蔚>0$ (i) there is an $(伪-蔚)$-streaming approximation using space $O(\log{n})$; and (ii) any $(伪+蔚)$-streaming approximation requires space $惟(\sqrt{n})$. This generalizes the celebrated work of [Kapralov, Khanna, Sudan SODA 2015; Kapralov, Krachun STOC 2019], who showed that the optimal approximation ratio for Max-CUT was $1/2$. Prior to this work, the problem of determining this ratio was open for all other Max-2CSPs. Our results are quite surprising for some specific Max-2CSPs. For the Max-DCUT problem, there was a gap between an upper bound of $1/2$ and a lower bound of $2/5$ [Guruswami, Velingker, Velusamy APPROX 2017]. We show that neither of these bounds is tight, and the optimal ratio for Max-DCUT is $4/9$. We also establish that the tight approximation for Max-2SAT is $\sqrt{2}/2$, and for Exact Max-2SAT it is $3/4$. As a byproduct, our result gives a separation between space-efficient approximations for Max-2SAT and Exact Max-2SAT. This is in sharp contrast to the setting of polynomial-time algorithms with polynomial space, where the two problems are known to be equally hard to approximate. Finally, we prove that the tight streaming approximation for \mksat{} is $\sqrt{2}/2$ for every $k\geq2$. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.11796v4-abstract-full').style.display = 'none'; document.getElementById('2004.11796v4-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Full version for the conference version appearing in FOCS 2020. Fix an error in the algorithm for Max-kSAT</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2001.01395">arXiv:2001.01395</a> <span> [<a href="https://arxiv.org/pdf/2001.01395">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Accumulated Polar Feature-based Deep Learning for Efficient and Lightweight Automatic Modulation Classification with Channel Compensation Mechanism </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Teng%2C+C">Chieh-Fang Teng</a>, <a href="/search/cs?searchtype=author&query=Chou%2C+C">Ching-Yao Chou</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+C">Chun-Hsiang Chen</a>, <a href="/search/cs?searchtype=author&query=Wu%2C+A">An-Yeu Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2001.01395v2-abstract-short" style="display: inline;"> In next-generation communications, massive machine-type communications (mMTC) induce severe burden on base stations. To address such an issue, automatic modulation classification (AMC) can help to reduce signaling overhead by blindly recognizing the modulation types without handshaking. Thus, it plays an important role in future intelligent modems. The emerging deep learning (DL) technique stores… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2001.01395v2-abstract-full').style.display = 'inline'; document.getElementById('2001.01395v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2001.01395v2-abstract-full" style="display: none;"> In next-generation communications, massive machine-type communications (mMTC) induce severe burden on base stations. To address such an issue, automatic modulation classification (AMC) can help to reduce signaling overhead by blindly recognizing the modulation types without handshaking. Thus, it plays an important role in future intelligent modems. The emerging deep learning (DL) technique stores intelligence in the network, resulting in superior performance over traditional approaches. However, conventional DL-based approaches suffer from heavy training overhead, memory overhead, and computational complexity, which severely hinder practical applications for resource-limited scenarios, such as Vehicle-to-Everything (V2X) applications. Furthermore, the overhead of online retraining under time-varying fading channels has not been studied in the prior arts. In this work, an accumulated polar feature-based DL with a channel compensation mechanism is proposed to cope with the aforementioned issues. Firstly, the simulation results show that learning features from the polar domain with historical data information can approach near-optimal performance while reducing training overhead by 99.8 times. Secondly, the proposed neural network-based channel estimator (NN-CE) can learn the channel response and compensate for the distorted channel with 13% improvement. Moreover, in applying this lightweight NN-CE in a time-varying fading channel, two efficient mechanisms of online retraining are proposed, which can reduce transmission overhead and retraining overhead by 90% and 76%, respectively. Finally, the performance of the proposed approach is evaluated and compared with prior arts on a public dataset to demonstrate its great efficiency and lightness. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2001.01395v2-abstract-full').style.display = 'none'; document.getElementById('2001.01395v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 January, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages, 13 figures, 8 tables</span> </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&query=Chou%2C+C&start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&query=Chou%2C+C&start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&query=Chou%2C+C&start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>