CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;43 of 43 results for author: <span class="mathjax">Lim, S H</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Lim%2C+S+H">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Lim, S H"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Lim%2C+S+H&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Lim, S H"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.21256">arXiv:2410.21256</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.21256">pdf</a>, <a href="https://arxiv.org/format/2410.21256">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Multi-modal AI for comprehensive breast cancer prognostication </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Witowski%2C+J">Jan Witowski</a>, <a href="/search/cs?searchtype=author&amp;query=Zeng%2C+K">Ken Zeng</a>, <a href="/search/cs?searchtype=author&amp;query=Cappadona%2C+J">Joseph Cappadona</a>, <a href="/search/cs?searchtype=author&amp;query=Elayoubi%2C+J">Jailan Elayoubi</a>, <a href="/search/cs?searchtype=author&amp;query=Chiru%2C+E+D">Elena Diana Chiru</a>, <a href="/search/cs?searchtype=author&amp;query=Chan%2C+N">Nancy Chan</a>, <a href="/search/cs?searchtype=author&amp;query=Kang%2C+Y">Young-Joon Kang</a>, <a href="/search/cs?searchtype=author&amp;query=Howard%2C+F">Frederick Howard</a>, <a href="/search/cs?searchtype=author&amp;query=Ostrovnaya%2C+I">Irina Ostrovnaya</a>, <a href="/search/cs?searchtype=author&amp;query=Fernandez-Granda%2C+C">Carlos Fernandez-Granda</a>, <a href="/search/cs?searchtype=author&amp;query=Schnabel%2C+F">Freya Schnabel</a>, <a href="/search/cs?searchtype=author&amp;query=Ozerdem%2C+U">Ugur Ozerdem</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+K">Kangning Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Steinsnyder%2C+Z">Zoe Steinsnyder</a>, <a href="/search/cs?searchtype=author&amp;query=Thakore%2C+N">Nitya Thakore</a>, <a href="/search/cs?searchtype=author&amp;query=Sadic%2C+M">Mohammad Sadic</a>, <a href="/search/cs?searchtype=author&amp;query=Yeung%2C+F">Frank Yeung</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+E">Elisa Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Hill%2C+T">Theodore Hill</a>, <a href="/search/cs?searchtype=author&amp;query=Swett%2C+B">Benjamin Swett</a>, <a href="/search/cs?searchtype=author&amp;query=Rigau%2C+D">Danielle Rigau</a>, <a href="/search/cs?searchtype=author&amp;query=Clayburn%2C+A">Andrew Clayburn</a>, <a href="/search/cs?searchtype=author&amp;query=Speirs%2C+V">Valerie Speirs</a>, <a href="/search/cs?searchtype=author&amp;query=Vetter%2C+M">Marcus Vetter</a>, <a href="/search/cs?searchtype=author&amp;query=Sojak%2C+L">Lina Sojak</a> , et al. (26 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.21256v1-abstract-short" style="display: inline;"> Treatment selection in breast cancer is guided by molecular subtypes and clinical characteristics. Recurrence risk assessment plays a crucial role in personalizing treatment. Current methods, including genomic assays, have limited accuracy and clinical utility, leading to suboptimal decisions for many patients. We developed a test for breast cancer patient stratification based on digital pathology&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.21256v1-abstract-full').style.display = 'inline'; document.getElementById('2410.21256v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.21256v1-abstract-full" style="display: none;"> Treatment selection in breast cancer is guided by molecular subtypes and clinical characteristics. Recurrence risk assessment plays a crucial role in personalizing treatment. Current methods, including genomic assays, have limited accuracy and clinical utility, leading to suboptimal decisions for many patients. We developed a test for breast cancer patient stratification based on digital pathology and clinical characteristics using novel AI methods. Specifically, we utilized a vision transformer-based pan-cancer foundation model trained with self-supervised learning to extract features from digitized H&amp;E-stained slides. These features were integrated with clinical data to form a multi-modal AI test predicting cancer recurrence and death. The test was developed and evaluated using data from a total of 8,161 breast cancer patients across 15 cohorts originating from seven countries. Of these, 3,502 patients from five cohorts were used exclusively for evaluation, while the remaining patients were used for training. Our test accurately predicted our primary endpoint, disease-free interval, in the five external cohorts (C-index: 0.71 [0.68-0.75], HR: 3.63 [3.02-4.37, p&lt;0.01]). In a direct comparison (N=858), the AI test was more accurate than Oncotype DX, the standard-of-care 21-gene assay, with a C-index of 0.67 [0.61-0.74] versus 0.61 [0.49-0.73], respectively. Additionally, the AI test added independent information to Oncotype DX in a multivariate analysis (HR: 3.11 [1.91-5.09, p&lt;0.01)]). The test demonstrated robust accuracy across all major breast cancer subtypes, including TNBC (C-index: 0.71 [0.62-0.81], HR: 3.81 [2.35-6.17, p=0.02]), where no diagnostic tools are currently recommended by clinical guidelines. These results suggest that our AI test can improve accuracy, extend applicability to a wider range of patients, and enhance access to treatment selection tools. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.21256v1-abstract-full').style.display = 'none'; document.getElementById('2410.21256v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.03229">arXiv:2410.03229</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.03229">pdf</a>, <a href="https://arxiv.org/format/2410.03229">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Elucidating the Design Choice of Probability Paths in Flow Matching for Forecasting </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Soon Hoe Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yijin Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+A">Annan Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Hart%2C+E">Emma Hart</a>, <a href="/search/cs?searchtype=author&amp;query=Mahoney%2C+M+W">Michael W. Mahoney</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+X+S">Xiaoye S. Li</a>, <a href="/search/cs?searchtype=author&amp;query=Erichson%2C+N+B">N. Benjamin Erichson</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.03229v1-abstract-short" style="display: inline;"> Flow matching has recently emerged as a powerful paradigm for generative modeling and has been extended to probabilistic time series forecasting in latent spaces. However, the impact of the specific choice of probability path model on forecasting performance remains under-explored. In this work, we demonstrate that forecasting spatio-temporal data with flow matching is highly sensitive to the sele&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.03229v1-abstract-full').style.display = 'inline'; document.getElementById('2410.03229v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.03229v1-abstract-full" style="display: none;"> Flow matching has recently emerged as a powerful paradigm for generative modeling and has been extended to probabilistic time series forecasting in latent spaces. However, the impact of the specific choice of probability path model on forecasting performance remains under-explored. In this work, we demonstrate that forecasting spatio-temporal data with flow matching is highly sensitive to the selection of the probability path model. Motivated by this insight, we propose a novel probability path model designed to improve forecasting performance. Our empirical results across various dynamical system benchmarks show that our model achieves faster convergence during training and improved predictive performance compared to existing probability path models. Importantly, our approach is efficient during inference, requiring only a few sampling steps. This makes our proposed model practical for real-world applications and opens new avenues for probabilistic forecasting. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.03229v1-abstract-full').style.display = 'none'; document.getElementById('2410.03229v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">30 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.02035">arXiv:2410.02035</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.02035">pdf</a>, <a href="https://arxiv.org/format/2410.02035">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Tuning Frequency Bias of State Space Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yu%2C+A">Annan Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Lyu%2C+D">Dongwei Lyu</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Soon Hoe Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Mahoney%2C+M+W">Michael W. Mahoney</a>, <a href="/search/cs?searchtype=author&amp;query=Erichson%2C+N+B">N. Benjamin Erichson</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.02035v1-abstract-short" style="display: inline;"> State space models (SSMs) leverage linear, time-invariant (LTI) systems to effectively learn sequences with long-range dependencies. By analyzing the transfer functions of LTI systems, we find that SSMs exhibit an implicit bias toward capturing low-frequency components more effectively than high-frequency ones. This behavior aligns with the broader notion of frequency bias in deep learning model t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.02035v1-abstract-full').style.display = 'inline'; document.getElementById('2410.02035v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.02035v1-abstract-full" style="display: none;"> State space models (SSMs) leverage linear, time-invariant (LTI) systems to effectively learn sequences with long-range dependencies. By analyzing the transfer functions of LTI systems, we find that SSMs exhibit an implicit bias toward capturing low-frequency components more effectively than high-frequency ones. This behavior aligns with the broader notion of frequency bias in deep learning model training. We show that the initialization of an SSM assigns it an innate frequency bias and that training the model in a conventional way does not alter this bias. Based on our theory, we propose two mechanisms to tune frequency bias: either by scaling the initialization to tune the inborn frequency bias; or by applying a Sobolev-norm-based filter to adjust the sensitivity of the gradients to high-frequency inputs, which allows us to change the frequency bias via training. Using an image-denoising task, we empirically show that we can strengthen, weaken, or even reverse the frequency bias using both mechanisms. By tuning the frequency bias, we can also improve SSMs&#39; performance on learning long-range sequences, averaging an 88.26% accuracy on the Long-Range Arena (LRA) benchmark tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.02035v1-abstract-full').style.display = 'none'; document.getElementById('2410.02035v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.17006">arXiv:2408.17006</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.17006">pdf</a>, <a href="https://arxiv.org/format/2408.17006">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Retrieval-Augmented Natural Language Reasoning for Explainable Visual Question Answering </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Su Hyeon Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+M">Minkuk Kim</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+H+B">Hyeon Bae Kim</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+S+T">Seong Tae Kim</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.17006v1-abstract-short" style="display: inline;"> Visual Question Answering with Natural Language Explanation (VQA-NLE) task is challenging due to its high demand for reasoning-based inference. Recent VQA-NLE studies focus on enhancing model networks to amplify the model&#39;s reasoning capability but this approach is resource-consuming and unstable. In this work, we introduce a new VQA-NLE model, ReRe (Retrieval-augmented natural language Reasoning)&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.17006v1-abstract-full').style.display = 'inline'; document.getElementById('2408.17006v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.17006v1-abstract-full" style="display: none;"> Visual Question Answering with Natural Language Explanation (VQA-NLE) task is challenging due to its high demand for reasoning-based inference. Recent VQA-NLE studies focus on enhancing model networks to amplify the model&#39;s reasoning capability but this approach is resource-consuming and unstable. In this work, we introduce a new VQA-NLE model, ReRe (Retrieval-augmented natural language Reasoning), using leverage retrieval information from the memory to aid in generating accurate answers and persuasive explanations without relying on complex networks and extra datasets. ReRe is an encoder-decoder architecture model using a pre-trained clip vision encoder and a pre-trained GPT-2 language model as a decoder. Cross-attention layers are added in the GPT-2 for processing retrieval features. ReRe outperforms previous methods in VQA accuracy and explanation score and shows improvement in NLE with more persuasive, reliability. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.17006v1-abstract-full').style.display = 'none'; document.getElementById('2408.17006v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ICIP Workshop 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.13562">arXiv:2402.13562</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2402.13562">pdf</a>, <a href="https://arxiv.org/format/2402.13562">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Analysis of Multi-Source Language Training in Cross-Lingual Transfer </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Seong Hoon Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Yun%2C+T">Taejun Yun</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+J">Jinhyeon Kim</a>, <a href="/search/cs?searchtype=author&amp;query=Choi%2C+J">Jihun Choi</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+T">Taeuk Kim</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.13562v2-abstract-short" style="display: inline;"> The successful adaptation of multilingual language models (LMs) to a specific language-task pair critically depends on the availability of data tailored for that condition. While cross-lingual transfer (XLT) methods have contributed to addressing this data scarcity problem, there still exists ongoing debate about the mechanisms behind their effectiveness. In this work, we focus on one of promising&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.13562v2-abstract-full').style.display = 'inline'; document.getElementById('2402.13562v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.13562v2-abstract-full" style="display: none;"> The successful adaptation of multilingual language models (LMs) to a specific language-task pair critically depends on the availability of data tailored for that condition. While cross-lingual transfer (XLT) methods have contributed to addressing this data scarcity problem, there still exists ongoing debate about the mechanisms behind their effectiveness. In this work, we focus on one of promising assumptions about inner workings of XLT, that it encourages multilingual LMs to place greater emphasis on language-agnostic or task-specific features. We test this hypothesis by examining how the patterns of XLT change with a varying number of source languages involved in the process. Our experimental findings show that the use of multiple source languages in XLT-a technique we term Multi-Source Language Training (MSLT)-leads to increased mingling of embedding spaces for different languages, supporting the claim that XLT benefits from making use of language-independent information. On the other hand, we discover that using an arbitrary combination of source languages does not always guarantee better performance. We suggest simple heuristics for identifying effective language combinations for MSLT and empirically prove its effectiveness. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.13562v2-abstract-full').style.display = 'none'; document.getElementById('2402.13562v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 21 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to ACL 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.12499">arXiv:2401.12499</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2401.12499">pdf</a>, <a href="https://arxiv.org/ps/2401.12499">ps</a>, <a href="https://arxiv.org/format/2401.12499">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> On the Fundamental Tradeoff of Joint Communication and Quickest Change Detection with State-Independent Data Channels </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Seo%2C+D">Daewon Seo</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Sung Hoon Lim</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.12499v2-abstract-short" style="display: inline;"> In this work, we take the initiative in studying the information-theoretic tradeoff between communication and quickest change detection (QCD) under an integrated sensing and communication setting. We formally establish a joint communication and sensing problem for the quickest change detection. We assume a broadcast channel with a transmitter, a communication receiver, and a QCD detector in which&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.12499v2-abstract-full').style.display = 'inline'; document.getElementById('2401.12499v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.12499v2-abstract-full" style="display: none;"> In this work, we take the initiative in studying the information-theoretic tradeoff between communication and quickest change detection (QCD) under an integrated sensing and communication setting. We formally establish a joint communication and sensing problem for the quickest change detection. We assume a broadcast channel with a transmitter, a communication receiver, and a QCD detector in which only the detection channel is state dependent. For the problem setting, by utilizing constant subblock-composition codes and a modified CuSum detection rule, which we call subblock CuSum (SCS), we provide an inner bound on the information-theoretic tradeoff between communication rate and change point detection delay in the asymptotic regime of vanishing false alarm rate. We further provide a partial converse that matches our inner bound for a certain class of codes. This implies that the SCS detection strategy is asymptotically optimal for our codes as the false alarm rate constraint vanishes. We also present some canonical examples of the tradeoff region for a binary channel, a scalar Gaussian channel, and a MIMO Gaussian channel. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.12499v2-abstract-full').style.display = 'none'; document.getElementById('2401.12499v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.17166">arXiv:2310.17166</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.17166">pdf</a>, <a href="https://arxiv.org/format/2310.17166">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> X-SNS: Cross-Lingual Transfer Prediction through Sub-Network Similarity </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yun%2C+T">Taejun Yun</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+J">Jinhyeon Kim</a>, <a href="/search/cs?searchtype=author&amp;query=Kang%2C+D">Deokyeong Kang</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Seong Hoon Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+J">Jihoon Kim</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+T">Taeuk Kim</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.17166v1-abstract-short" style="display: inline;"> Cross-lingual transfer (XLT) is an emergent ability of multilingual language models that preserves their performance on a task to a significant extent when evaluated in languages that were not included in the fine-tuning process. While English, due to its widespread usage, is typically regarded as the primary language for model adaption in various tasks, recent studies have revealed that the effic&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.17166v1-abstract-full').style.display = 'inline'; document.getElementById('2310.17166v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.17166v1-abstract-full" style="display: none;"> Cross-lingual transfer (XLT) is an emergent ability of multilingual language models that preserves their performance on a task to a significant extent when evaluated in languages that were not included in the fine-tuning process. While English, due to its widespread usage, is typically regarded as the primary language for model adaption in various tasks, recent studies have revealed that the efficacy of XLT can be amplified by selecting the most appropriate source languages based on specific conditions. In this work, we propose the utilization of sub-network similarity between two languages as a proxy for predicting the compatibility of the languages in the context of XLT. Our approach is model-oriented, better reflecting the inner workings of foundation models. In addition, it requires only a moderate amount of raw text from candidate languages, distinguishing it from the majority of previous methods that rely on external resources. In experiments, we demonstrate that our method is more effective than baselines across diverse tasks. Specifically, it shows proficiency in ranking candidates for zero-shot XLT, achieving an improvement of 4.6% on average in terms of NDCG@3. We also provide extensive analyses that confirm the utility of sub-networks for XLT prediction. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.17166v1-abstract-full').style.display = 'none'; document.getElementById('2310.17166v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to EMNLP 2023 (Findings)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.02537">arXiv:2310.02537</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.02537">pdf</a>, <a href="https://arxiv.org/format/2310.02537">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> A Context-Aware CEO Problem </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Seo%2C+D">Daewon Seo</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Sung Hoon Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+Y">Yongjune Kim</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.02537v1-abstract-short" style="display: inline;"> In many sensor network applications, a fusion center often has additional valuable information, such as context data, which cannot be obtained directly from the sensors. Motivated by this, we study a generalized CEO problem where a CEO has access to context information. The main contribution of this work is twofold. Firstly, we characterize the asymptotically optimal error exponent per rate as the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.02537v1-abstract-full').style.display = 'inline'; document.getElementById('2310.02537v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.02537v1-abstract-full" style="display: none;"> In many sensor network applications, a fusion center often has additional valuable information, such as context data, which cannot be obtained directly from the sensors. Motivated by this, we study a generalized CEO problem where a CEO has access to context information. The main contribution of this work is twofold. Firstly, we characterize the asymptotically optimal error exponent per rate as the number of sensors and sum rate grow without bound. The proof extends the Berger-Tung coding scheme and the converse argument by Berger et al. (1996) taking into account context information. The resulting expression includes the minimum Chernoff divergence over context information. Secondly, assuming that the sizes of the source and context alphabets are respectively $|\mathcal{X}|$ and $|\mathcal{S}|$, we prove that it is asymptotically optimal to partition all sensors into at most $\binom{|\mathcal{X}|}{2} |\mathcal{S}|$ groups and have the sensors in each group adopt the same encoding scheme. Our problem subsumes the original CEO problem by Berger et al. (1996) as a special case if there is only one letter for context information; in this case, our result tightens its required number of groups from $\binom{|\mathcal{X}|}{2}+2$ to $\binom{|\mathcal{X}|}{2}$. We also numerically demonstrate the effect of context information for a simple Gaussian scenario. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.02537v1-abstract-full').style.display = 'none'; document.getElementById('2310.02537v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2212.00228">arXiv:2212.00228</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2212.00228">pdf</a>, <a href="https://arxiv.org/format/2212.00228">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Gated Recurrent Neural Networks with Weighted Time-Delay Feedback </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Erichson%2C+N+B">N. Benjamin Erichson</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Soon Hoe Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Mahoney%2C+M+W">Michael W. Mahoney</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2212.00228v1-abstract-short" style="display: inline;"> We introduce a novel gated recurrent unit (GRU) with a weighted time-delay feedback mechanism in order to improve the modeling of long-term dependencies in sequential data. This model is a discretized version of a continuous-time formulation of a recurrent unit, where the dynamics are governed by delay differential equations (DDEs). By considering a suitable time-discretization scheme, we propose&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.00228v1-abstract-full').style.display = 'inline'; document.getElementById('2212.00228v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2212.00228v1-abstract-full" style="display: none;"> We introduce a novel gated recurrent unit (GRU) with a weighted time-delay feedback mechanism in order to improve the modeling of long-term dependencies in sequential data. This model is a discretized version of a continuous-time formulation of a recurrent unit, where the dynamics are governed by delay differential equations (DDEs). By considering a suitable time-discretization scheme, we propose $蟿$-GRU, a discrete-time gated recurrent unit with delay. We prove the existence and uniqueness of solutions for the continuous-time model, and we demonstrate that the proposed feedback mechanism can help improve the modeling of long-term dependencies. Our empirical results show that $蟿$-GRU can converge faster and generalize better than state-of-the-art recurrent units and gated recurrent architectures on a range of tasks, including time-series classification, human activity recognition, and speech recognition. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.00228v1-abstract-full').style.display = 'none'; document.getElementById('2212.00228v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.11361">arXiv:2205.11361</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2205.11361">pdf</a>, <a href="https://arxiv.org/format/2205.11361">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Dynamical Systems">math.DS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Probability">math.PR</span> </div> </div> <p class="title is-5 mathjax"> Chaotic Regularization and Heavy-Tailed Limits for Deterministic Gradient Descent </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Soon Hoe Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Wan%2C+Y">Yijun Wan</a>, <a href="/search/cs?searchtype=author&amp;query=%C5%9Eim%C5%9Fekli%2C+U">Umut 艦im艧ekli</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.11361v2-abstract-short" style="display: inline;"> Recent studies have shown that gradient descent (GD) can achieve improved generalization when its dynamics exhibits a chaotic behavior. However, to obtain the desired effect, the step-size should be chosen sufficiently large, a task which is problem dependent and can be difficult in practice. In this study, we incorporate a chaotic component to GD in a controlled manner, and introduce multiscale p&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.11361v2-abstract-full').style.display = 'inline'; document.getElementById('2205.11361v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.11361v2-abstract-full" style="display: none;"> Recent studies have shown that gradient descent (GD) can achieve improved generalization when its dynamics exhibits a chaotic behavior. However, to obtain the desired effect, the step-size should be chosen sufficiently large, a task which is problem dependent and can be difficult in practice. In this study, we incorporate a chaotic component to GD in a controlled manner, and introduce multiscale perturbed GD (MPGD), a novel optimization framework where the GD recursion is augmented with chaotic perturbations that evolve via an independent dynamical system. We analyze MPGD from three different angles: (i) By building up on recent advances in rough paths theory, we show that, under appropriate assumptions, as the step-size decreases, the MPGD recursion converges weakly to a stochastic differential equation (SDE) driven by a heavy-tailed L茅vy-stable process. (ii) By making connections to recently developed generalization bounds for heavy-tailed processes, we derive a generalization bound for the limiting SDE and relate the worst-case generalization error over the trajectories of the process to the parameters of MPGD. (iii) We analyze the implicit regularization effect brought by the dynamical regularization and show that, in the weak perturbation regime, MPGD introduces terms that penalize the Hessian of the loss function. Empirical results are provided to demonstrate the advantages of MPGD. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.11361v2-abstract-full').style.display = 'none'; document.getElementById('2205.11361v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">24 pages, accepted at NeurIPS 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2202.13436">arXiv:2202.13436</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2202.13436">pdf</a>, <a href="https://arxiv.org/format/2202.13436">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Neural-Progressive Hedging: Enforcing Constraints in Reinforcement Learning with Stochastic Programming </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ghosh%2C+S">Supriyo Ghosh</a>, <a href="/search/cs?searchtype=author&amp;query=Wynter%2C+L">Laura Wynter</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Shiau Hong Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+D+T">Duc Thien Nguyen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2202.13436v1-abstract-short" style="display: inline;"> We propose a framework, called neural-progressive hedging (NP), that leverages stochastic programming during the online phase of executing a reinforcement learning (RL) policy. The goal is to ensure feasibility with respect to constraints and risk-based objectives such as conditional value-at-risk (CVaR) during the execution of the policy, using probabilistic models of the state transitions to gui&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.13436v1-abstract-full').style.display = 'inline'; document.getElementById('2202.13436v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2202.13436v1-abstract-full" style="display: none;"> We propose a framework, called neural-progressive hedging (NP), that leverages stochastic programming during the online phase of executing a reinforcement learning (RL) policy. The goal is to ensure feasibility with respect to constraints and risk-based objectives such as conditional value-at-risk (CVaR) during the execution of the policy, using probabilistic models of the state transitions to guide policy adjustments. The framework is particularly amenable to the class of sequential resource allocation problems since feasibility with respect to typical resource constraints cannot be enforced in a scalable manner. The NP framework provides an alternative that adds modest overhead during the online phase. Experimental results demonstrate the efficacy of the NP framework on two continuous real-world tasks: (i) the portfolio optimization problem with liquidity constraints for financial planning, characterized by non-stationary state distributions; and (ii) the dynamic repositioning problem in bike sharing systems, that embodies the class of supply-demand matching problems. We show that the NP framework produces policies that are better than deep RL and other baseline approaches, adapting to non-stationarity, whilst satisfying structural constraints and accommodating risk measures in the resulting policies. Additional benefits of the NP framework are ease of implementation and better explainability of the policies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.13436v1-abstract-full').style.display = 'none'; document.getElementById('2202.13436v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2202.01972">arXiv:2202.01972</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2202.01972">pdf</a>, <a href="https://arxiv.org/ps/2202.01972">ps</a>, <a href="https://arxiv.org/format/2202.01972">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Hybrid Neural Coded Modulation: Design and Training Methods </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Sung Hoon Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+J">Jiyong Han</a>, <a href="/search/cs?searchtype=author&amp;query=Noh%2C+W">Wonjong Noh</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+Y">Yujae Song</a>, <a href="/search/cs?searchtype=author&amp;query=Jeon%2C+S">Sang-Woon Jeon</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2202.01972v1-abstract-short" style="display: inline;"> We propose a hybrid coded modulation scheme which composes of inner and outer codes. The outer-code can be any standard binary linear code with efficient soft decoding capability (e.g. low-density parity-check (LDPC) codes). The inner code is designed using a deep neural network (DNN) which takes the channel coded bits and outputs modulated symbols. For training the DNN, we propose to use a loss f&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.01972v1-abstract-full').style.display = 'inline'; document.getElementById('2202.01972v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2202.01972v1-abstract-full" style="display: none;"> We propose a hybrid coded modulation scheme which composes of inner and outer codes. The outer-code can be any standard binary linear code with efficient soft decoding capability (e.g. low-density parity-check (LDPC) codes). The inner code is designed using a deep neural network (DNN) which takes the channel coded bits and outputs modulated symbols. For training the DNN, we propose to use a loss function that is inspired by the generalized mutual information. The resulting constellations are shown to outperform the conventional quadrature amplitude modulation (QAM) based coding scheme for modulation order 16 and 64 with 5G standard LDPC codes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.01972v1-abstract-full').style.display = 'none'; document.getElementById('2202.01972v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2202.01263">arXiv:2202.01263</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2202.01263">pdf</a>, <a href="https://arxiv.org/format/2202.01263">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> NoisyMix: Boosting Model Robustness to Common Corruptions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Erichson%2C+N+B">N. Benjamin Erichson</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Soon Hoe Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+W">Winnie Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Utrera%2C+F">Francisco Utrera</a>, <a href="/search/cs?searchtype=author&amp;query=Cao%2C+Z">Ziang Cao</a>, <a href="/search/cs?searchtype=author&amp;query=Mahoney%2C+M+W">Michael W. Mahoney</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2202.01263v2-abstract-short" style="display: inline;"> For many real-world applications, obtaining stable and robust statistical performance is more important than simply achieving state-of-the-art predictive test accuracy, and thus robustness of neural networks is an increasingly important topic. Relatedly, data augmentation schemes have been shown to improve robustness with respect to input perturbations and domain shifts. Motivated by this, we intr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.01263v2-abstract-full').style.display = 'inline'; document.getElementById('2202.01263v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2202.01263v2-abstract-full" style="display: none;"> For many real-world applications, obtaining stable and robust statistical performance is more important than simply achieving state-of-the-art predictive test accuracy, and thus robustness of neural networks is an increasingly important topic. Relatedly, data augmentation schemes have been shown to improve robustness with respect to input perturbations and domain shifts. Motivated by this, we introduce NoisyMix, a novel training scheme that promotes stability as well as leverages noisy augmentations in input and feature space to improve both model robustness and in-domain accuracy. NoisyMix produces models that are consistently more robust and that provide well-calibrated estimates of class membership probabilities. We demonstrate the benefits of NoisyMix on a range of benchmark datasets, including ImageNet-C, ImageNet-R, and ImageNet-P. Moreover, we provide theory to understand implicit regularization and robustness of NoisyMix. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.01263v2-abstract-full').style.display = 'none'; document.getElementById('2202.01263v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.07275">arXiv:2110.07275</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.07275">pdf</a>, <a href="https://arxiv.org/format/2110.07275">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Order Constraints in Optimal Transport </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lim%2C+F">Fabian Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Wynter%2C+L">Laura Wynter</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Shiau Hong Lim</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.07275v2-abstract-short" style="display: inline;"> Optimal transport is a framework for comparing measures whereby a cost is incurred for transporting one measure to another. Recent works have aimed to improve optimal transport plans through the introduction of various forms of structure. We introduce novel order constraints into the optimal transport formulation to allow for the incorporation of structure. We define an efficient method for obtain&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.07275v2-abstract-full').style.display = 'inline'; document.getElementById('2110.07275v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.07275v2-abstract-full" style="display: none;"> Optimal transport is a framework for comparing measures whereby a cost is incurred for transporting one measure to another. Recent works have aimed to improve optimal transport plans through the introduction of various forms of structure. We introduce novel order constraints into the optimal transport formulation to allow for the incorporation of structure. We define an efficient method for obtaining explainable solutions to the new formulation that scales far better than standard approaches. The theoretical properties of the method are provided. We demonstrate experimentally that order constraints improve explainability using the e-SNLI (Stanford Natural Language Inference) dataset that includes human-annotated rationales as well as on several image color transfer examples. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.07275v2-abstract-full').style.display = 'none'; document.getElementById('2110.07275v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in Proceedings of ICML 2022. Main Paper + Supplementary</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.02180">arXiv:2110.02180</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.02180">pdf</a>, <a href="https://arxiv.org/format/2110.02180">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Noisy Feature Mixup </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Soon Hoe Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Erichson%2C+N+B">N. Benjamin Erichson</a>, <a href="/search/cs?searchtype=author&amp;query=Utrera%2C+F">Francisco Utrera</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+W">Winnie Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Mahoney%2C+M+W">Michael W. Mahoney</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.02180v2-abstract-short" style="display: inline;"> We introduce Noisy Feature Mixup (NFM), an inexpensive yet effective method for data augmentation that combines the best of interpolation based training and noise injection schemes. Rather than training with convex combinations of pairs of examples and their labels, we use noise-perturbed convex combinations of pairs of data points in both input and feature space. This method includes mixup and ma&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.02180v2-abstract-full').style.display = 'inline'; document.getElementById('2110.02180v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.02180v2-abstract-full" style="display: none;"> We introduce Noisy Feature Mixup (NFM), an inexpensive yet effective method for data augmentation that combines the best of interpolation based training and noise injection schemes. Rather than training with convex combinations of pairs of examples and their labels, we use noise-perturbed convex combinations of pairs of data points in both input and feature space. This method includes mixup and manifold mixup as special cases, but it has additional advantages, including better smoothing of decision boundaries and enabling improved model robustness. We provide theory to understand this as well as the implicit regularization effects of NFM. Our theory is supported by empirical results, demonstrating the advantage of NFM, as compared to mixup and manifold mixup. We show that residual networks and vision transformers trained with NFM have favorable trade-offs between predictive accuracy on clean data and robustness with respect to various types of data perturbation across a range of computer vision benchmark datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.02180v2-abstract-full').style.display = 'none'; document.getElementById('2110.02180v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">34 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> ICLR 2022 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.00132">arXiv:2110.00132</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.00132">pdf</a>, <a href="https://arxiv.org/format/2110.00132">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> A Unified Discretization Approach to Compute-Forward: From Discrete to Continuous Inputs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pastore%2C+A">Adriano Pastore</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Sung Hoon Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+C">Chen Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Nazer%2C+B">Bobak Nazer</a>, <a href="/search/cs?searchtype=author&amp;query=Gastpar%2C+M">Michael Gastpar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.00132v1-abstract-short" style="display: inline;"> Compute-forward is a coding technique that enables receiver(s) in a network to directly decode one or more linear combinations of the transmitted codewords. Initial efforts focused on Gaussian channels and derived achievable rate regions via nested lattice codes and single-user (lattice) decoding as well as sequential (lattice) decoding. Recently, these results have been generalized to discrete me&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.00132v1-abstract-full').style.display = 'inline'; document.getElementById('2110.00132v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.00132v1-abstract-full" style="display: none;"> Compute-forward is a coding technique that enables receiver(s) in a network to directly decode one or more linear combinations of the transmitted codewords. Initial efforts focused on Gaussian channels and derived achievable rate regions via nested lattice codes and single-user (lattice) decoding as well as sequential (lattice) decoding. Recently, these results have been generalized to discrete memoryless channels via nested linear codes and joint typicality coding, culminating in a simultaneous-decoding rate region for recovering one or more linear combinations from $K$ users. Using a discretization approach, this paper translates this result into a simultaneous-decoding rate region for a wide class of continuous memoryless channels, including the important special case of Gaussian channels. Additionally, this paper derives a single, unified expression for both discrete and continuous rate regions via an algebraic generalization of R茅nyi&#39;s information dimension. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.00132v1-abstract-full').style.display = 'none'; document.getElementById('2110.00132v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">86 pages, 7 figures, submitted to IEEE Transactions of Information Theory</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.09785">arXiv:2102.09785</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2102.09785">pdf</a>, <a href="https://arxiv.org/ps/2102.09785">ps</a>, <a href="https://arxiv.org/format/2102.09785">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Deep Learning-based Beam Tracking for Millimeter-wave Communications under Mobility </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Sun Hong Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+S">Sunwoo Kim</a>, <a href="/search/cs?searchtype=author&amp;query=Shim%2C+B">Byonghyo Shim</a>, <a href="/search/cs?searchtype=author&amp;query=Choi%2C+J+W">Jun Won Choi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.09785v2-abstract-short" style="display: inline;"> In this paper, we propose a deep learning-based beam tracking method for millimeter-wave (mmWave)communications. Beam tracking is employed for transmitting the known symbols using the sounding beams and tracking time-varying channels to maintain a reliable communication link. When the pose of a user equipment (UE) device varies rapidly, the mmWave channels also tend to vary fast, which hinders sea&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.09785v2-abstract-full').style.display = 'inline'; document.getElementById('2102.09785v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.09785v2-abstract-full" style="display: none;"> In this paper, we propose a deep learning-based beam tracking method for millimeter-wave (mmWave)communications. Beam tracking is employed for transmitting the known symbols using the sounding beams and tracking time-varying channels to maintain a reliable communication link. When the pose of a user equipment (UE) device varies rapidly, the mmWave channels also tend to vary fast, which hinders seamless communication. Thus, models that can capture temporal behavior of mmWave channels caused by the motion of the device are required, to cope with this problem. Accordingly, we employa deep neural network to analyze the temporal structure and patterns underlying in the time-varying channels and the signals acquired by inertial sensors. We propose a model based on long short termmemory (LSTM) that predicts the distribution of the future channel behavior based on a sequence of input signals available at the UE. This channel distribution is used to 1) control the sounding beams adaptively for the future channel state and 2) update the channel estimate through the measurement update step under a sequential Bayesian estimation framework. Our experimental results demonstrate that the proposed method achieves a significant performance gain over the conventional beam tracking methods under various mobility scenarios. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.09785v2-abstract-full').style.display = 'none'; document.getElementById('2102.09785v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">23 pages, 8 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.09361">arXiv:2102.09361</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2102.09361">pdf</a>, <a href="https://arxiv.org/format/2102.09361">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Efficient Reinforcement Learning in Resource Allocation Problems Through Permutation Invariant Multi-task Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cai%2C+D">Desmond Cai</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Shiau Hong Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Wynter%2C+L">Laura Wynter</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.09361v1-abstract-short" style="display: inline;"> One of the main challenges in real-world reinforcement learning is to learn successfully from limited training samples. We show that in certain settings, the available data can be dramatically increased through a form of multi-task learning, by exploiting an invariance property in the tasks. We provide a theoretical performance bound for the gain in sample efficiency under this setting. This motiv&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.09361v1-abstract-full').style.display = 'inline'; document.getElementById('2102.09361v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.09361v1-abstract-full" style="display: none;"> One of the main challenges in real-world reinforcement learning is to learn successfully from limited training samples. We show that in certain settings, the available data can be dramatically increased through a form of multi-task learning, by exploiting an invariance property in the tasks. We provide a theoretical performance bound for the gain in sample efficiency under this setting. This motivates a new approach to multi-task learning, which involves the design of an appropriate neural network architecture and a prioritized task-sampling strategy. We demonstrate empirically the effectiveness of the proposed approach on two real-world sequential resource allocation tasks where this invariance property occurs: financial portfolio optimization and meta federated learning. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.09361v1-abstract-full').style.display = 'none'; document.getElementById('2102.09361v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.04877">arXiv:2102.04877</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2102.04877">pdf</a>, <a href="https://arxiv.org/format/2102.04877">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Dynamical Systems">math.DS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Probability">math.PR</span> </div> </div> <p class="title is-5 mathjax"> Noisy Recurrent Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Soon Hoe Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Erichson%2C+N+B">N. Benjamin Erichson</a>, <a href="/search/cs?searchtype=author&amp;query=Hodgkinson%2C+L">Liam Hodgkinson</a>, <a href="/search/cs?searchtype=author&amp;query=Mahoney%2C+M+W">Michael W. Mahoney</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.04877v3-abstract-short" style="display: inline;"> We provide a general framework for studying recurrent neural networks (RNNs) trained by injecting noise into hidden states. Specifically, we consider RNNs that can be viewed as discretizations of stochastic differential equations driven by input data. This framework allows us to study the implicit regularization effect of general noise injection schemes by deriving an approximate explicit regulari&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.04877v3-abstract-full').style.display = 'inline'; document.getElementById('2102.04877v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.04877v3-abstract-full" style="display: none;"> We provide a general framework for studying recurrent neural networks (RNNs) trained by injecting noise into hidden states. Specifically, we consider RNNs that can be viewed as discretizations of stochastic differential equations driven by input data. This framework allows us to study the implicit regularization effect of general noise injection schemes by deriving an approximate explicit regularizer in the small noise regime. We find that, under reasonable assumptions, this implicit regularization promotes flatter minima; it biases towards models with more stable dynamics; and, in classification tasks, it favors models with larger classification margin. Sufficient conditions for global stability are obtained, highlighting the phenomenon of stochastic stabilization, where noise injection can improve stability during training. Our theory is supported by empirical results which demonstrate that the RNNs have improved robustness with respect to various input perturbations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.04877v3-abstract-full').style.display = 'none'; document.getElementById('2102.04877v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">38 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> NeurIPS 2021 (https://proceedings.neurips.cc/paper/2021/hash/29301521774ff3cbd26652b2d5c95996-Abstract.html) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2101.06171">arXiv:2101.06171</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2101.06171">pdf</a>, <a href="https://arxiv.org/format/2101.06171">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> Probabilistic Inference for Learning from Untrusted Sources </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+D+T">Duc Thien Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Shiau Hoong Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Wynter%2C+L">Laura Wynter</a>, <a href="/search/cs?searchtype=author&amp;query=Cai%2C+D">Desmond Cai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2101.06171v1-abstract-short" style="display: inline;"> Federated learning brings potential benefits of faster learning, better solutions, and a greater propensity to transfer when heterogeneous data from different parties increases diversity. However, because federated learning tasks tend to be large and complex, and training times non-negligible, it is important for the aggregation algorithm to be robust to non-IID data and corrupted parties. This ro&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.06171v1-abstract-full').style.display = 'inline'; document.getElementById('2101.06171v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2101.06171v1-abstract-full" style="display: none;"> Federated learning brings potential benefits of faster learning, better solutions, and a greater propensity to transfer when heterogeneous data from different parties increases diversity. However, because federated learning tasks tend to be large and complex, and training times non-negligible, it is important for the aggregation algorithm to be robust to non-IID data and corrupted parties. This robustness relies on the ability to identify, and appropriately weight, incompatible parties. Recent work assumes that a \textit{reference dataset} is available through which to perform the identification. We consider settings where no such reference dataset is available; rather, the quality and suitability of the parties needs to be \textit{inferred}. We do so by bringing ideas from crowdsourced predictions and collaborative filtering, where one must infer an unknown ground truth given proposals from participants with unknown quality. We propose novel federated learning aggregation algorithms based on Bayesian inference that adapt to the quality of the parties. Empirically, we show that the algorithms outperform standard and robust aggregation in federated learning on both synthetic and real data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.06171v1-abstract-full').style.display = 'none'; document.getElementById('2101.06171v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2009.06303">arXiv:2009.06303</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2009.06303">pdf</a>, <a href="https://arxiv.org/format/2009.06303">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Robustness and Personalization in Federated Learning: A Unified Approach via Regularization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kundu%2C+A">Achintya Kundu</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+P">Pengqian Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Wynter%2C+L">Laura Wynter</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Shiau Hong Lim</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2009.06303v3-abstract-short" style="display: inline;"> We present a class of methods for robust, personalized federated learning, called Fed+, that unifies many federated learning algorithms. The principal advantage of this class of methods is to better accommodate the real-world characteristics found in federated training, such as the lack of IID data across parties, the need for robustness to outliers or stragglers, and the requirement to perform we&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.06303v3-abstract-full').style.display = 'inline'; document.getElementById('2009.06303v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2009.06303v3-abstract-full" style="display: none;"> We present a class of methods for robust, personalized federated learning, called Fed+, that unifies many federated learning algorithms. The principal advantage of this class of methods is to better accommodate the real-world characteristics found in federated training, such as the lack of IID data across parties, the need for robustness to outliers or stragglers, and the requirement to perform well on party-specific datasets. We achieve this through a problem formulation that allows the central server to employ robust ways of aggregating the local models while keeping the structure of local computation intact. Without making any statistical assumption on the degree of heterogeneity of local data across parties, we provide convergence guarantees for Fed+ for convex and non-convex loss functions under different (robust) aggregation methods. The Fed+ theory is also equipped to handle heterogeneous computing environments including stragglers without additional assumptions; specifically, the convergence results cover the general setting where the number of local update steps across parties can vary. We demonstrate the benefits of Fed+ through extensive experiments across standard benchmark datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.06303v3-abstract-full').style.display = 'none'; document.getElementById('2009.06303v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 September, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by IEEE EDGE 2022 (16 pages, 4 figures, 2 tables)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2006.11052">arXiv:2006.11052</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2006.11052">pdf</a>, <a href="https://arxiv.org/ps/2006.11052">ps</a>, <a href="https://arxiv.org/format/2006.11052">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Disordered Systems and Neural Networks">cond-mat.dis-nn</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Probability">math.PR</span> </div> </div> <p class="title is-5 mathjax"> Understanding Recurrent Neural Networks Using Nonequilibrium Response Theory </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Soon Hoe Lim</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2006.11052v2-abstract-short" style="display: inline;"> Recurrent neural networks (RNNs) are brain-inspired models widely used in machine learning for analyzing sequential data. The present work is a contribution towards a deeper understanding of how RNNs process input signals using the response theory from nonequilibrium statistical mechanics. For a class of continuous-time stochastic RNNs (SRNNs) driven by an input signal, we derive a Volterra type s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.11052v2-abstract-full').style.display = 'inline'; document.getElementById('2006.11052v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2006.11052v2-abstract-full" style="display: none;"> Recurrent neural networks (RNNs) are brain-inspired models widely used in machine learning for analyzing sequential data. The present work is a contribution towards a deeper understanding of how RNNs process input signals using the response theory from nonequilibrium statistical mechanics. For a class of continuous-time stochastic RNNs (SRNNs) driven by an input signal, we derive a Volterra type series representation for their output. This representation is interpretable and disentangles the input signal from the SRNN architecture. The kernels of the series are certain recursively defined correlation functions with respect to the unperturbed dynamics that completely determine the output. Exploiting connections of this representation and its implications to rough paths theory, we identify a universal feature -- the response feature, which turns out to be the signature of tensor product of the input signal and a natural support basis. In particular, we show that SRNNs, with only the weights in the readout layer optimized and the weights in the hidden layer kept fixed and not optimized, can be viewed as kernel machines operating on a reproducing kernel Hilbert space associated with the response feature. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.11052v2-abstract-full').style.display = 'none'; document.getElementById('2006.11052v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">48 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Journal of Machine Learning Research (2021) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2006.00778">arXiv:2006.00778</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2006.00778">pdf</a>, <a href="https://arxiv.org/format/2006.00778">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Variational Bayesian Inference for Crowdsourcing Predictions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cai%2C+D">Desmond Cai</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+D+T">Duc Thien Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Shiau Hong Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Wynter%2C+L">Laura Wynter</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2006.00778v2-abstract-short" style="display: inline;"> Crowdsourcing has emerged as an effective means for performing a number of machine learning tasks such as annotation and labelling of images and other data sets. In most early settings of crowdsourcing, the task involved classification, that is assigning one of a discrete set of labels to each task. Recently, however, more complex tasks have been attempted including asking crowdsource workers to a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.00778v2-abstract-full').style.display = 'inline'; document.getElementById('2006.00778v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2006.00778v2-abstract-full" style="display: none;"> Crowdsourcing has emerged as an effective means for performing a number of machine learning tasks such as annotation and labelling of images and other data sets. In most early settings of crowdsourcing, the task involved classification, that is assigning one of a discrete set of labels to each task. Recently, however, more complex tasks have been attempted including asking crowdsource workers to assign continuous labels, or predictions. In essence, this involves the use of crowdsourcing for function estimation. We are motivated by this problem to drive applications such as collaborative prediction, that is, harnessing the wisdom of the crowd to predict quantities more accurately. To do so, we propose a Bayesian approach aimed specifically at alleviating overfitting, a typical impediment to accurate prediction models in practice. In particular, we develop a variational Bayesian technique for two different worker noise models - one that assumes workers&#39; noises are independent and the other that assumes workers&#39; noises have a latent low-rank structure. Our evaluations on synthetic and real-world datasets demonstrate that these Bayesian approaches perform significantly better than existing non-Bayesian approaches and are thus potentially useful for this class of crowdsourcing problems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.00778v2-abstract-full').style.display = 'none'; document.getElementById('2006.00778v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 1 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2004.01387">arXiv:2004.01387</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2004.01387">pdf</a>, <a href="https://arxiv.org/format/2004.01387">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> A Deep Ensemble Multi-Agent Reinforcement Learning Approach for Air Traffic Control </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ghosh%2C+S">Supriyo Ghosh</a>, <a href="/search/cs?searchtype=author&amp;query=Laguna%2C+S">Sean Laguna</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Shiau Hong Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Wynter%2C+L">Laura Wynter</a>, <a href="/search/cs?searchtype=author&amp;query=Poonawala%2C+H">Hasan Poonawala</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2004.01387v1-abstract-short" style="display: inline;"> Air traffic control is an example of a highly challenging operational problem that is readily amenable to human expertise augmentation via decision support technologies. In this paper, we propose a new intelligent decision making framework that leverages multi-agent reinforcement learning (MARL) to dynamically suggest adjustments of aircraft speeds in real-time. The goal of the system is to enhanc&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.01387v1-abstract-full').style.display = 'inline'; document.getElementById('2004.01387v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2004.01387v1-abstract-full" style="display: none;"> Air traffic control is an example of a highly challenging operational problem that is readily amenable to human expertise augmentation via decision support technologies. In this paper, we propose a new intelligent decision making framework that leverages multi-agent reinforcement learning (MARL) to dynamically suggest adjustments of aircraft speeds in real-time. The goal of the system is to enhance the ability of an air traffic controller to provide effective guidance to aircraft to avoid air traffic congestion, near-miss situations, and to improve arrival timeliness. We develop a novel deep ensemble MARL method that can concisely capture the complexity of the air traffic control problem by learning to efficiently arbitrate between the decisions of a local kernel-based RL model and a wider-reaching deep MARL model. The proposed method is trained and evaluated on an open-source air traffic management simulator developed by Eurocontrol. Extensive empirical results on a real-world dataset including thousands of aircraft demonstrate the feasibility of using multi-agent RL for the problem of en-route air traffic control and show that our proposed deep ensemble MARL method significantly outperforms three state-of-the-art benchmark approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.01387v1-abstract-full').style.display = 'none'; document.getElementById('2004.01387v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1901.03274">arXiv:1901.03274</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1901.03274">pdf</a>, <a href="https://arxiv.org/ps/1901.03274">ps</a>, <a href="https://arxiv.org/format/1901.03274">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Towards an Algebraic Network Information Theory: Simultaneous Joint Typicality Decoding </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Sung Hoon Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+C">Chen Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Pastore%2C+A">Adriano Pastore</a>, <a href="/search/cs?searchtype=author&amp;query=Nazer%2C+B">Bobak Nazer</a>, <a href="/search/cs?searchtype=author&amp;query=Gastpar%2C+M">Michael Gastpar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1901.03274v1-abstract-short" style="display: inline;"> Consider a receiver in a multi-user network that wishes to decode several messages. Simultaneous joint typicality decoding is one of the most powerful techniques for determining the fundamental limits at which reliable decoding is possible. This technique has historically been used in conjunction with random i.i.d. codebooks to establish achievable rate regions for networks. Recently, it has been&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.03274v1-abstract-full').style.display = 'inline'; document.getElementById('1901.03274v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1901.03274v1-abstract-full" style="display: none;"> Consider a receiver in a multi-user network that wishes to decode several messages. Simultaneous joint typicality decoding is one of the most powerful techniques for determining the fundamental limits at which reliable decoding is possible. This technique has historically been used in conjunction with random i.i.d. codebooks to establish achievable rate regions for networks. Recently, it has been shown that, in certain scenarios, nested linear codebooks in conjunction with &#34;single-user&#34; or sequential decoding can yield better achievable rates. For instance, the compute-forward problem examines the scenario of recovering $L \le K$ linear combinations of transmitted codewords over a $K$-user multiple-access channel (MAC), and it is well established that linear codebooks can yield higher rates. Here, we develop bounds for simultaneous joint typicality decoding used in conjunction with nested linear codebooks, and apply them to obtain a larger achievable region for compute-forward over a $K$-user discrete memoryless MAC. The key technical challenge is that competing codeword tuples that are linearly dependent on the true codeword tuple introduce statistical dependencies, which requires careful partitioning of the associated error events. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.03274v1-abstract-full').style.display = 'none'; document.getElementById('1901.03274v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 January, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">24 pages, 5 figures, submitted to IEEE Transactions on Information Theory</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1805.03338">arXiv:1805.03338</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1805.03338">pdf</a>, <a href="https://arxiv.org/ps/1805.03338">ps</a>, <a href="https://arxiv.org/format/1805.03338">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> On the Optimal Achievable Rates for Linear Computation With Random Homologous Codes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sen%2C+P">Pinar Sen</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Sung Hoon Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+Y">Young-Han Kim</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1805.03338v2-abstract-short" style="display: inline;"> The problem of computing a linear combination of sources over a multiple access channel is studied. Inner and outer bounds on the optimal tradeoff between the communication rates are established when encoding is restricted to random ensembles of homologous codes, namely, structured nested coset codes from the same generator matrix and individual shaping functions, but when decoding is optimized wi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.03338v2-abstract-full').style.display = 'inline'; document.getElementById('1805.03338v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1805.03338v2-abstract-full" style="display: none;"> The problem of computing a linear combination of sources over a multiple access channel is studied. Inner and outer bounds on the optimal tradeoff between the communication rates are established when encoding is restricted to random ensembles of homologous codes, namely, structured nested coset codes from the same generator matrix and individual shaping functions, but when decoding is optimized with respect to the realization of the encoders. For the special case in which the desired linear combination is &#34;matched&#34; to the structure of the multiple access channel in a natural sense, these inner and outer bounds coincide. This result indicates that most, if not all, coding schemes for computation in the literature that rely on random construction of nested coset codes cannot be improved by using more powerful decoders, such as the maximum likelihood decoder. The proof techniques are adapted to characterize the rate region for broadcast channels achieved by Marton&#39;s (random) coding scheme under maximum likelihood decoding. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.03338v2-abstract-full').style.display = 'none'; document.getElementById('1805.03338v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 October, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 May, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1804.07973">arXiv:1804.07973</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1804.07973">pdf</a>, <a href="https://arxiv.org/ps/1804.07973">ps</a>, <a href="https://arxiv.org/format/1804.07973">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Efficient Beam Training and Channel Estimation for Millimeter Wave Communications Under Mobility </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Sun Hong Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Bae%2C+J">Jisu Bae</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+S">Sunwoo Kim</a>, <a href="/search/cs?searchtype=author&amp;query=Shim%2C+B">Byonghyo Shim</a>, <a href="/search/cs?searchtype=author&amp;query=Choi%2C+J+W">Jun Won Choi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1804.07973v3-abstract-short" style="display: inline;"> In this paper, we propose an efficient beam training technique for millimeter-wave (mmWave) communications. When some mobile users are under high mobility, the beam training should be performed frequently to ensure the accurate acquisition of the channel state information. In order to reduce the resource overhead caused by frequent beam training, we introduce a dedicated beam training strategy whi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1804.07973v3-abstract-full').style.display = 'inline'; document.getElementById('1804.07973v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1804.07973v3-abstract-full" style="display: none;"> In this paper, we propose an efficient beam training technique for millimeter-wave (mmWave) communications. When some mobile users are under high mobility, the beam training should be performed frequently to ensure the accurate acquisition of the channel state information. In order to reduce the resource overhead caused by frequent beam training, we introduce a dedicated beam training strategy which sends the training beams separately to a specific high mobility user (called a target user) without changing the periodicity of the conventional beam training. The dedicated beam training requires small amount of resources since the training beams can be optimized for the target user. In order to satisfy the performance requirement with low training overhead, we propose the optimal training beam selection strategy which finds the best beamforming vectors yielding the lowest channel estimation error based on the target user&#39;s probabilistic channel information. Such dedicated beam training is combined with the greedy channel estimation algorithm that accounts for sparse characteristics and temporal dynamics of the target user&#39;s channel. Our numerical evaluation demonstrates that the proposed scheme can maintain good channel estimation performance with significantly less training overhead compared to the conventional beam training protocols. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1804.07973v3-abstract-full').style.display = 'none'; document.getElementById('1804.07973v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 October, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 21 April, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">3p pages, This paper was submitted to IEEE Trans. Wireless Commun. on Oct. 6, 2018</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1804.07642">arXiv:1804.07642</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1804.07642">pdf</a>, <a href="https://arxiv.org/ps/1804.07642">ps</a>, <a href="https://arxiv.org/format/1804.07642">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> On the Effects of Subpacketization in Content-Centric Mobile Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Malik%2C+A">Adeel Malik</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Sung Hoon Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Shin%2C+W">Won-Yong Shin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1804.07642v1-abstract-short" style="display: inline;"> A large-scale content-centric mobile ad hoc network employing subpacketization is studied in which each mobile node having finite-size cache moves according to the reshuffling mobility model and requests a content object from the library independently at random according to the Zipf popularity distribution. Instead of assuming that one content object is transferred in a single time slot, we consid&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1804.07642v1-abstract-full').style.display = 'inline'; document.getElementById('1804.07642v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1804.07642v1-abstract-full" style="display: none;"> A large-scale content-centric mobile ad hoc network employing subpacketization is studied in which each mobile node having finite-size cache moves according to the reshuffling mobility model and requests a content object from the library independently at random according to the Zipf popularity distribution. Instead of assuming that one content object is transferred in a single time slot, we consider a more challenging scenario where the size of each content object is considerably large and thus only a subpacket of a file can be delivered during one time slot, which is motivated by a fast mobility scenario. Under our mobility model, we consider a single-hop-based content delivery and characterize the fundamental trade-offs between throughput and delay. The order-optimal throughput-delay trade-off is analyzed by presenting the following two content reception strategies: the sequential reception for uncoded caching and the random reception for maximum distance separable (MDS)-coded caching. We also perform numerical evaluation to validate our analytical results. In particular, we conduct performance comparisons between the uncoded caching and the MDS-coded caching strategies by identifying the regimes in which the performance difference between the two caching strategies becomes prominent with respect to system parameters such as the Zipf exponent and the number of subpackets. In addition, we extend our study to the random walk mobility scenario and show that our main results are essentially the same as those in the reshuffling mobility model. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1804.07642v1-abstract-full').style.display = 'none'; document.getElementById('1804.07642v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 April, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 pages, 6 figures, To appear in the IEEE Journal on Selected Areas in Communications</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1804.04818">arXiv:1804.04818</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1804.04818">pdf</a>, <a href="https://arxiv.org/ps/1804.04818">ps</a>, <a href="https://arxiv.org/format/1804.04818">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Cooperative Strategies for {UAV}-Enabled Small Cell Networks Sharing Unlicensed Spectrum </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Song%2C+Y">Yujae Song</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Sung Hoon Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Jeon%2C+S">Sang-Woon Jeon</a>, <a href="/search/cs?searchtype=author&amp;query=Baek%2C+S">Seungjae Baek</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1804.04818v1-abstract-short" style="display: inline;"> In this paper, we study an aerial drone base station (DBS) assisted cellular network that consists of a single ground macro base station (MBS), multiple DBSs, and multiple ground terminals (GT). We assume that the MBS transmits to the DBSs and the GTs in the licensed band while the DBSs use a separate unlicensed band (e.g. Wi-Fi) to transmit to the GTs. For the utilization of the DBSs, we propose&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1804.04818v1-abstract-full').style.display = 'inline'; document.getElementById('1804.04818v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1804.04818v1-abstract-full" style="display: none;"> In this paper, we study an aerial drone base station (DBS) assisted cellular network that consists of a single ground macro base station (MBS), multiple DBSs, and multiple ground terminals (GT). We assume that the MBS transmits to the DBSs and the GTs in the licensed band while the DBSs use a separate unlicensed band (e.g. Wi-Fi) to transmit to the GTs. For the utilization of the DBSs, we propose a cooperative decode--forward (DF) protocol in which multiple DBSs assist the terminals simultaneously while maintaining a predetermined interference level on the coexisting unlicensed band users. For our network setup, we formulate a joint optimization problem for minimizing the aggregate gap between the target rates and the throughputs of terminals by optimizing over the 3D positions of the DBSs and the resources (power, time, bandwidth) of the network. To solve the optimization problem, we propose an efficient nested structured algorithm based on particle swarm optimization and convex optimization methods. Extensive numerical evaluations of the proposed algorithm is performed considering various aspects to demonstrate the performance of our algorithm and the gain for utilizing DBSs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1804.04818v1-abstract-full').style.display = 'none'; document.getElementById('1804.04818v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 April, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">26 pages, 10 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1712.10293">arXiv:1712.10293</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1712.10293">pdf</a>, <a href="https://arxiv.org/ps/1712.10293">ps</a>, <a href="https://arxiv.org/format/1712.10293">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Compute--Forward Multiple Access (CFMA): Practical Code Design </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sula%2C+E">Erixhen Sula</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+J">Jingge Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Pastore%2C+A">Adriano Pastore</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Sung Hoon Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Gastpar%2C+M">Michael Gastpar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1712.10293v1-abstract-short" style="display: inline;"> We present a practical strategy that aims to attain rate points on the dominant face of the multiple access channel capacity using a standard low complexity decoder. This technique is built upon recent theoretical developments of Zhu and Gastpar on compute-forward multiple access (CFMA) which achieves the capacity of the multiple access channel using a sequential decoder. We illustrate this strate&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1712.10293v1-abstract-full').style.display = 'inline'; document.getElementById('1712.10293v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1712.10293v1-abstract-full" style="display: none;"> We present a practical strategy that aims to attain rate points on the dominant face of the multiple access channel capacity using a standard low complexity decoder. This technique is built upon recent theoretical developments of Zhu and Gastpar on compute-forward multiple access (CFMA) which achieves the capacity of the multiple access channel using a sequential decoder. We illustrate this strategy with off-the-shelf LDPC codes. In the first stage of decoding, the receiver first recovers a linear combination of the transmitted codewords using the sum-product algorithm (SPA). In the second stage, by using the recovered sum-of-codewords as side information, the receiver recovers one of the two codewords using a modified SPA, ultimately recovering both codewords. The main benefit of recovering the sum-of-codewords instead of the codeword itself is that it allows to attain points on the dominant face of the multiple access channel capacity without the need of rate-splitting or time sharing while maintaining a low complexity in the order of a standard point-to-point decoder. This property is also shown to be crucial for some applications, e.g., interference channels. For all the simulations with single-layer binary codes, our proposed practical strategy is shown to be within \SI{1.7}{\decibel} of the theoretical limits, without explicit optimization on the off-the-self LDPC codes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1712.10293v1-abstract-full').style.display = 'none'; document.getElementById('1712.10293v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 December, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">30 pages, 14 figures, submitted to the IEEE Transactions on Wireless Communications</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1707.08621">arXiv:1707.08621</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1707.08621">pdf</a>, <a href="https://arxiv.org/ps/1707.08621">ps</a>, <a href="https://arxiv.org/format/1707.08621">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Communication versus Computation: Duality for multiple access channels and source coding </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+J">Jingge Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Sung Hoon Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Gastpar%2C+M">Michael Gastpar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1707.08621v1-abstract-short" style="display: inline;"> Computation codes in network information theory are designed for the scenarios where the decoder is not interested in recovering the information sources themselves, but only a function thereof. K枚rner and Marton showed for distributed source coding that such function decoding can be achieved more efficiently than decoding the full information sources. Compute-and-forward has shown that function de&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1707.08621v1-abstract-full').style.display = 'inline'; document.getElementById('1707.08621v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1707.08621v1-abstract-full" style="display: none;"> Computation codes in network information theory are designed for the scenarios where the decoder is not interested in recovering the information sources themselves, but only a function thereof. K枚rner and Marton showed for distributed source coding that such function decoding can be achieved more efficiently than decoding the full information sources. Compute-and-forward has shown that function decoding, in combination with network coding ideas, is a useful building block for end-to-end communication. In both cases, good computation codes are the key component in the coding schemes. In this work, we expose the fact that good computation codes could undermine the capability of the codes for recovering the information sources individually, e.g., for the purpose of multiple access and distributed source coding. Particularly, we establish duality results between the codes which are good for computation and the codes which are good for multiple access or distributed compression. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1707.08621v1-abstract-full').style.display = 'none'; document.getElementById('1707.08621v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 July, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">submitted</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1702.00276">arXiv:1702.00276</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1702.00276">pdf</a>, <a href="https://arxiv.org/ps/1702.00276">ps</a>, <a href="https://arxiv.org/format/1702.00276">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> New Beam Tracking Technique for Millimeter Wave-band Communications </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bae%2C+J">Jisu Bae</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Sun Hong Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Yoo%2C+J+H">Jin Hyeok Yoo</a>, <a href="/search/cs?searchtype=author&amp;query=Choi%2C+J+W">Jun Won Choi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1702.00276v1-abstract-short" style="display: inline;"> In this paper, we propose an efficient beam tracking method for mobility scenario in mmWave-band communications. When the position of the mobile changes in mobility scenario, the base-station needs to perform beam training frequently to track the time-varying channel, thereby spending significant resources for training beams. In order to reduce the training overhead, we propose a new beam training&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1702.00276v1-abstract-full').style.display = 'inline'; document.getElementById('1702.00276v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1702.00276v1-abstract-full" style="display: none;"> In this paper, we propose an efficient beam tracking method for mobility scenario in mmWave-band communications. When the position of the mobile changes in mobility scenario, the base-station needs to perform beam training frequently to track the time-varying channel, thereby spending significant resources for training beams. In order to reduce the training overhead, we propose a new beam training approach called &#34;beam tracking&#34; which exploits the continuous nature of time varying angle of departure (AoD) for beam selection. We show that transmission of only two training beams is enough to track the time-varying AoD at good accuracy. We derive the optimal selection of beam pair which minimizes Cramer-Rao Lower Bound (CRLB) for AoD estimation averaged over statistical distribution of the AoD. Our numerical results demonstrate that the proposed beam tracking scheme produces better AoD estimation than the conventional beam training protocol with less training overhead. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1702.00276v1-abstract-full').style.display = 'none'; document.getElementById('1702.00276v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 February, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">6 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1606.09548">arXiv:1606.09548</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1606.09548">pdf</a>, <a href="https://arxiv.org/ps/1606.09548">ps</a>, <a href="https://arxiv.org/format/1606.09548">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> A Joint Typicality Approach to Algebraic Network Information Theory </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Sung Hoon Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+C">Chen Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Pastore%2C+A">Adriano Pastore</a>, <a href="/search/cs?searchtype=author&amp;query=Nazer%2C+B">Bobak Nazer</a>, <a href="/search/cs?searchtype=author&amp;query=Gastpar%2C+M">Michael Gastpar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1606.09548v1-abstract-short" style="display: inline;"> This paper presents a joint typicality framework for encoding and decoding nested linear codes for multi-user networks. This framework provides a new perspective on compute-forward within the context of discrete memoryless networks. In particular, it establishes an achievable rate region for computing the weighted sum of nested linear codewords over a discrete memoryless multiple-access channel (M&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1606.09548v1-abstract-full').style.display = 'inline'; document.getElementById('1606.09548v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1606.09548v1-abstract-full" style="display: none;"> This paper presents a joint typicality framework for encoding and decoding nested linear codes for multi-user networks. This framework provides a new perspective on compute-forward within the context of discrete memoryless networks. In particular, it establishes an achievable rate region for computing the weighted sum of nested linear codewords over a discrete memoryless multiple-access channel (MAC). When specialized to the Gaussian MAC, this rate region recovers and improves upon the lattice-based compute-forward rate region of Nazer and Gastpar, thus providing a unified approach for discrete memoryless and Gaussian networks. Furthermore, this framework can be used to shed light on the joint decoding rate region for compute-forward, which is considered an open problem. Specifically, this work establishes an achievable rate region for simultaneously decoding two linear combinations of nested linear codewords from K senders. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1606.09548v1-abstract-full').style.display = 'none'; document.getElementById('1606.09548v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 June, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2016. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">69 pages, 11 figures, submitted to the IEEE Transactions on Information Theory</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1605.02597">arXiv:1605.02597</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1605.02597">pdf</a>, <a href="https://arxiv.org/ps/1605.02597">ps</a>, <a href="https://arxiv.org/format/1605.02597">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/JSAC.2016.2615186">10.1109/JSAC.2016.2615186 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Fundamental Limits of Spectrum Sharing Full-Duplex Multicell Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chae%2C+S+H">Sung Ho Chae</a>, <a href="/search/cs?searchtype=author&amp;query=Jeon%2C+S">Sang-Woon Jeon</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Sung Hoon Lim</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1605.02597v1-abstract-short" style="display: inline;"> This paper studies the degrees of freedom of full-duplex multicell networks that share the spectrum among multiple cells in a non-orthogonal setting. In the considered network, we assume that {\em full-duplex} base stations with multiple transmit and receive antennas communicate with multiple single-antenna mobile users. By spectrum sharing among multiple cells and (simultaneously) enabling full-d&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1605.02597v1-abstract-full').style.display = 'inline'; document.getElementById('1605.02597v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1605.02597v1-abstract-full" style="display: none;"> This paper studies the degrees of freedom of full-duplex multicell networks that share the spectrum among multiple cells in a non-orthogonal setting. In the considered network, we assume that {\em full-duplex} base stations with multiple transmit and receive antennas communicate with multiple single-antenna mobile users. By spectrum sharing among multiple cells and (simultaneously) enabling full-duplex radio, the network can utilize the spectrum more flexibly, but, at the same time, the network is subject to multiple sources of interference compared to a network with separately dedicated bands for distinct cells and uplink--downlink traffic. Consequently, to take advantage of the additional freedom in utilizing the spectrum, interference management is a crucial ingredient. In this work, we propose a novel strategy based on interference alignment which takes into account inter-cell interference and intra-cell interference caused by spectrum sharing and full-duplex to establish a general achievability result on the sum degrees of freedom of the considered network. Paired with an upper bound on the sum degrees of freedom, which is tight under certain conditions, we demonstrate how spectrum sharing and full-duplex can significantly improve the throughput over conventional cellular networks, especially for a network with large number of users and/or cells. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1605.02597v1-abstract-full').style.display = 'none'; document.getElementById('1605.02597v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 May, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2016. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">20 pages, 6 figures, submitted to IEEE Journal on Selected Areas in Communications</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1604.02333">arXiv:1604.02333</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1604.02333">pdf</a>, <a href="https://arxiv.org/ps/1604.02333">ps</a>, <a href="https://arxiv.org/format/1604.02333">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ISIT.2016.7541354">10.1109/ISIT.2016.7541354 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Information Theoretic Caching: The Multi-User Case </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Sung Hoon Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+C">Chien-Yi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Gastpar%2C+M">Michael Gastpar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1604.02333v1-abstract-short" style="display: inline;"> In this paper, we consider a cache aided network in which each user is assumed to have individual caches, while upon users&#39; requests, an update message is sent though a common link to all users. First, we formulate a general information theoretic setting that represents the database as a discrete memoryless source, and the users&#39; requests as side information that is available everywhere except at&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1604.02333v1-abstract-full').style.display = 'inline'; document.getElementById('1604.02333v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1604.02333v1-abstract-full" style="display: none;"> In this paper, we consider a cache aided network in which each user is assumed to have individual caches, while upon users&#39; requests, an update message is sent though a common link to all users. First, we formulate a general information theoretic setting that represents the database as a discrete memoryless source, and the users&#39; requests as side information that is available everywhere except at the cache encoder. The decoders&#39; objective is to recover a function of the source and the side information. By viewing cache aided networks in terms of a general distributed source coding problem and through information theoretic arguments, we present inner and outer bounds on the fundamental tradeoff of cache memory size and update rate. Then, we specialize our general inner and outer bounds to a specific model of content delivery networks: File selection networks, in which the database is a collection of independent equal-size files and each user requests one of the files independently. For file selection networks, we provide an outer bound and two inner bounds (for centralized and decentralized caching strategies). For the case when the user request information is uniformly distributed, we characterize the rate vs. cache size tradeoff to within a multiplicative gap of 4. By further extending our arguments to the framework of Maddah-Ali and Niesen, we also establish a new outer bound and two new inner bounds in which it is shown to recover the centralized and decentralized strategies, previously established by Maddah-Ali and Niesen. Finally, in terms of rate vs. cache size tradeoff, we improve the previous multiplicative gap of 72 to 4.7 for the average case with uniform requests. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1604.02333v1-abstract-full').style.display = 'none'; document.getElementById('1604.02333v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 April, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2016. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submitted to IEEE Trans. Inf. Theory and presented in part at ITA 2016. 43 pages, 8 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1601.05690">arXiv:1601.05690</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1601.05690">pdf</a>, <a href="https://arxiv.org/ps/1601.05690">ps</a>, <a href="https://arxiv.org/format/1601.05690">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> A New Converse Bound for Coded Caching </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+C">Chien-Yi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Sung Hoon Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Gastpar%2C+M">Michael Gastpar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1601.05690v1-abstract-short" style="display: inline;"> An information-theoretic lower bound is developed for the caching system studied by Maddah-Ali and Niesen. By comparing the proposed lower bound with the decentralized coded caching scheme of Maddah-Ali and Niesen, the optimal memory--rate tradeoff is characterized to within a multiplicative gap of $4.7$ for the worst case, improving the previous analytical gap of $12$. Furthermore, for the case w&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1601.05690v1-abstract-full').style.display = 'inline'; document.getElementById('1601.05690v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1601.05690v1-abstract-full" style="display: none;"> An information-theoretic lower bound is developed for the caching system studied by Maddah-Ali and Niesen. By comparing the proposed lower bound with the decentralized coded caching scheme of Maddah-Ali and Niesen, the optimal memory--rate tradeoff is characterized to within a multiplicative gap of $4.7$ for the worst case, improving the previous analytical gap of $12$. Furthermore, for the case when users&#39; requests follow the uniform distribution, the multiplicative gap is tightened to $4.7$, improving the previous analytical gap of $72$. As an independent result of interest, for the single-user average case in which the user requests multiple files, it is proved that caching the most requested files is optimal. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1601.05690v1-abstract-full').style.display = 'none'; document.getElementById('1601.05690v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 January, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2016. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">6 pages, 2 figures, to be presented at ITA 2016</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1510.00832">arXiv:1510.00832</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1510.00832">pdf</a>, <a href="https://arxiv.org/ps/1510.00832">ps</a>, <a href="https://arxiv.org/format/1510.00832">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Distributed Decode-Forward for Relay Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Sung Hoon Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+K+T">Kwang Taik Kim</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+Y">Young-Han Kim</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1510.00832v2-abstract-short" style="display: inline;"> A new coding scheme for general N-node relay networks is presented for unicast, multicast, and broadcast. The proposed distributed decode-forward scheme combines and generalizes Marton coding for single-hop broadcast channels and the Cover-El Gamal partial decode-forward coding scheme for 3-node relay channels. The key idea of the scheme is to precode all the codewords of the entire network at the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1510.00832v2-abstract-full').style.display = 'inline'; document.getElementById('1510.00832v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1510.00832v2-abstract-full" style="display: none;"> A new coding scheme for general N-node relay networks is presented for unicast, multicast, and broadcast. The proposed distributed decode-forward scheme combines and generalizes Marton coding for single-hop broadcast channels and the Cover-El Gamal partial decode-forward coding scheme for 3-node relay channels. The key idea of the scheme is to precode all the codewords of the entire network at the source by multicoding over multiple blocks. This encoding step allows these codewords to carry partial information of the messages implicitly without complicated rate splitting and routing. This partial information is then recovered at the relay nodes and forwarded further. For N-node Gaussian unicast, multicast, and broadcast relay networks, the scheme achieves within 0.5N bits from the cutset bound and thus from the capacity (region), regardless of the network topology, channel gains, or power constraints. Roughly speaking, distributed decode-forward is dual to noisy network coding, which generalized compress-forward to unicast, multicast, and multiple access relay networks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1510.00832v2-abstract-full').style.display = 'none'; document.getElementById('1510.00832v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 January, 2017; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 October, 2015; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2015. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">32 pages, 5 figures, submitted to the IEEE Transactions on Information Theory</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1504.00553">arXiv:1504.00553</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1504.00553">pdf</a>, <a href="https://arxiv.org/ps/1504.00553">ps</a>, <a href="https://arxiv.org/format/1504.00553">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TIT.2016.2604851">10.1109/TIT.2016.2604851 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Information-Theoretic Caching: Sequential Coding for Computing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+C">Chien-Yi Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Sung Hoon Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Gastpar%2C+M">Michael Gastpar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1504.00553v2-abstract-short" style="display: inline;"> Under the paradigm of caching, partial data is delivered before the actual requests of users are known. In this paper, this problem is modeled as a canonical distributed source coding problem with side information, where the side information represents the users&#39; requests. For the single-user case, a single-letter characterization of the optimal rate region is established, and for several importan&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1504.00553v2-abstract-full').style.display = 'inline'; document.getElementById('1504.00553v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1504.00553v2-abstract-full" style="display: none;"> Under the paradigm of caching, partial data is delivered before the actual requests of users are known. In this paper, this problem is modeled as a canonical distributed source coding problem with side information, where the side information represents the users&#39; requests. For the single-user case, a single-letter characterization of the optimal rate region is established, and for several important special cases, closed-form solutions are given, including the scenario of uniformly distributed user requests. In this case, it is shown that the optimal caching strategy is closely related to total correlation and Wyner&#39;s common information. Using the insight gained from the single-user case, three two-user scenarios admitting single-letter characterization are considered, which draw connections to existing source coding problems in the literature: the Gray--Wyner system and distributed successive refinement. Finally, the model studied by Maddah-Ali and Niesen is rephrased to make a comparison with the considered information-theoretic model. Although the two caching models have a similar behavior for the single-user case, it is shown through a two-user example that the two caching models behave differently in general. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1504.00553v2-abstract-full').style.display = 'none'; document.getElementById('1504.00553v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 February, 2016; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 April, 2015; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2015. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">submitted to IEEE Trans. Inf. Theory and presented in part at ISIT 2015</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1501.02889">arXiv:1501.02889</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1501.02889">pdf</a>, <a href="https://arxiv.org/ps/1501.02889">ps</a>, <a href="https://arxiv.org/format/1501.02889">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ISIT.2015.7282579">10.1109/ISIT.2015.7282579 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Degrees of Freedom of Full-Duplex Multiantenna Cellular Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jeon%2C+S">Sang-Woon Jeon</a>, <a href="/search/cs?searchtype=author&amp;query=Chae%2C+S+H">Sung Ho Chae</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Sung Hoon Lim</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1501.02889v1-abstract-short" style="display: inline;"> We study the degrees of freedom (DoF) of cellular networks in which a full duplex (FD) base station (BS) equipped with multiple transmit and receive antennas communicates with multiple mobile users. We consider two different scenarios. In the first scenario, we study the case when half duplex (HD) users, partitioned to either the uplink (UL) set or the downlink (DL) set, simultaneously communicate&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1501.02889v1-abstract-full').style.display = 'inline'; document.getElementById('1501.02889v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1501.02889v1-abstract-full" style="display: none;"> We study the degrees of freedom (DoF) of cellular networks in which a full duplex (FD) base station (BS) equipped with multiple transmit and receive antennas communicates with multiple mobile users. We consider two different scenarios. In the first scenario, we study the case when half duplex (HD) users, partitioned to either the uplink (UL) set or the downlink (DL) set, simultaneously communicate with the FD BS. In the second scenario, we study the case when FD users simultaneously communicate UL and DL data with the FD BS. Unlike conventional HD only systems, inter-user interference (within the cell) may severely limit the DoF, and must be carefully taken into account. With the goal of providing theoretical guidelines for designing such FD systems, we completely characterize the sum DoF of each of the two different FD cellular networks by developing an achievable scheme and obtaining a matching upper bound. The key idea of the proposed scheme is to carefully allocate UL and DL information streams using interference alignment and beamforming techniques. By comparing the DoFs of the considered FD systems with those of the conventional HD systems, we establish the DoF gain by enabling FD operation in various configurations. As a consequence of the result, we show that the DoF can approach the two-fold gain over the HD systems when the number of users becomes large enough as compared to the number of antennas at the BS. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1501.02889v1-abstract-full').style.display = 'none'; document.getElementById('1501.02889v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 January, 2015; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2015. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">21 pages, 16 figures, a shorter version of this paper has been submitted to the IEEE International Symposium on Information Theory (ISIT) 2015</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1306.0530">arXiv:1306.0530</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1306.0530">pdf</a>, <a href="https://arxiv.org/ps/1306.0530">ps</a>, <a href="https://arxiv.org/format/1306.0530">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Hybrid Coding: An Interface for Joint Source-Channel Coding and Network Communication </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Minero%2C+P">Paolo Minero</a>, <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Sung Hoon Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+Y">Young-Han Kim</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1306.0530v1-abstract-short" style="display: inline;"> A new approach to joint source-channel coding is presented in the context of communicating correlated sources over multiple access channels. Similar to the separation architecture, the joint source-channel coding system architecture in this approach is modular, whereby the source encoding and channel decoding operations are decoupled. However, unlike the separation architecture, the same codeword&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1306.0530v1-abstract-full').style.display = 'inline'; document.getElementById('1306.0530v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1306.0530v1-abstract-full" style="display: none;"> A new approach to joint source-channel coding is presented in the context of communicating correlated sources over multiple access channels. Similar to the separation architecture, the joint source-channel coding system architecture in this approach is modular, whereby the source encoding and channel decoding operations are decoupled. However, unlike the separation architecture, the same codeword is used for both source coding and channel coding, which allows the resulting hybrid coding scheme to achieve the performance of the best known joint source-channel coding schemes. Applications of the proposed architecture to relay communication are also discussed. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1306.0530v1-abstract-full').style.display = 'none'; document.getElementById('1306.0530v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 June, 2013; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2013. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">42 pages, 10 figures. Submitted to IEEE Transactions on Information Theory</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1202.3741">arXiv:1202.3741</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1202.3741">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Noisy Search with Comparative Feedback </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Shiau Hong Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Auer%2C+P">Peter Auer</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1202.3741v1-abstract-short" style="display: inline;"> We present theoretical results in terms of lower and upper bounds on the query complexity of noisy search with comparative feedback. In this search model, the noise in the feedback depends on the distance between query points and the search target. Consequently, the error probability in the feedback is not fixed but varies for the queries posed by the search algorithm. Our results show that a targ&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1202.3741v1-abstract-full').style.display = 'inline'; document.getElementById('1202.3741v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1202.3741v1-abstract-full" style="display: none;"> We present theoretical results in terms of lower and upper bounds on the query complexity of noisy search with comparative feedback. In this search model, the noise in the feedback depends on the distance between query points and the search target. Consequently, the error probability in the feedback is not fixed but varies for the queries posed by the search algorithm. Our results show that a target out of n items can be found in O(log n) queries. We also show the surprising result that for k possible answers per query, the speedup is not log k (as for k-ary search) but only log log k in some cases. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1202.3741v1-abstract-full').style.display = 'none'; document.getElementById('1202.3741v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 February, 2012; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2012. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> UAI-P-2011-PG-445-452 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1002.3188">arXiv:1002.3188</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1002.3188">pdf</a>, <a href="https://arxiv.org/ps/1002.3188">ps</a>, <a href="https://arxiv.org/format/1002.3188">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Noisy Network Coding </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Sung Hoon Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+Y">Young-Han Kim</a>, <a href="/search/cs?searchtype=author&amp;query=Gamal%2C+A+E">Abbas El Gamal</a>, <a href="/search/cs?searchtype=author&amp;query=Chung%2C+S">Sae-Young Chung</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1002.3188v2-abstract-short" style="display: inline;"> A noisy network coding scheme for sending multiple sources over a general noisy network is presented. For multi-source multicast networks, the scheme naturally extends both network coding over noiseless networks by Ahlswede, Cai, Li, and Yeung, and compress-forward coding for the relay channel by Cover and El Gamal to general discrete memoryless and Gaussian networks. The scheme also recovers as&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1002.3188v2-abstract-full').style.display = 'inline'; document.getElementById('1002.3188v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1002.3188v2-abstract-full" style="display: none;"> A noisy network coding scheme for sending multiple sources over a general noisy network is presented. For multi-source multicast networks, the scheme naturally extends both network coding over noiseless networks by Ahlswede, Cai, Li, and Yeung, and compress-forward coding for the relay channel by Cover and El Gamal to general discrete memoryless and Gaussian networks. The scheme also recovers as special cases the results on coding for wireless relay networks and deterministic networks by Avestimehr, Diggavi, and Tse, and coding for wireless erasure networks by Dana, Gowaikar, Palanki, Hassibi, and Effros. The scheme involves message repetition coding, relay signal compression, and simultaneous decoding. Unlike previous compress--forward schemes, where independent messages are sent over multiple blocks, the same message is sent multiple times using independent codebooks as in the network coding scheme for cyclic networks. Furthermore, the relays do not use Wyner--Ziv binning as in previous compress-forward schemes, and each decoder performs simultaneous joint typicality decoding on the received signals from all the blocks without explicitly decoding the compression indices. A consequence of this new scheme is that achievability is proved simply and more generally without resorting to time expansion to extend results for acyclic networks to networks with cycles. The noisy network coding scheme is then extended to general multi-source networks by combining it with decoding techniques for interference channels. For the Gaussian multicast network, noisy network coding improves the previously established gap to the cutset bound. We also demonstrate through two popular AWGN network examples that noisy network coding can outperform conventional compress-forward, amplify-forward, and hash-forward schemes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1002.3188v2-abstract-full').style.display = 'none'; document.getElementById('1002.3188v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 March, 2010; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 February, 2010; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2010. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">33 pages, 4 figures, submitted to IEEE Transactions on Information Theory</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/0905.3086">arXiv:0905.3086</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/0905.3086">pdf</a>, <a href="https://arxiv.org/ps/0905.3086">ps</a>, <a href="https://arxiv.org/format/0905.3086">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ISIT.2009.5205735">10.1109/ISIT.2009.5205735 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Deterministic Relay Networks with State Information </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lim%2C+S+H">Sung Hoon Lim</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+Y">Young-Han Kim</a>, <a href="/search/cs?searchtype=author&amp;query=Chung%2C+S">Sae-Young Chung</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="0905.3086v2-abstract-short" style="display: inline;"> Motivated by fading channels and erasure channels, the problem of reliable communication over deterministic relay networks is studied, in which relay nodes receive a function of the incoming signals and a random network state. An achievable rate is characterized for the case in which destination nodes have full knowledge of the state information. If the relay nodes receive a linear function of t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('0905.3086v2-abstract-full').style.display = 'inline'; document.getElementById('0905.3086v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="0905.3086v2-abstract-full" style="display: none;"> Motivated by fading channels and erasure channels, the problem of reliable communication over deterministic relay networks is studied, in which relay nodes receive a function of the incoming signals and a random network state. An achievable rate is characterized for the case in which destination nodes have full knowledge of the state information. If the relay nodes receive a linear function of the incoming signals and the state in a finite field, then the achievable rate is shown to be optimal, meeting the cut-set upper bound on the capacity. This result generalizes on a unified framework the work of Avestimehr, Diggavi, and Tse on the deterministic networks with state dependency, the work of Dana, Gowaikar, Palanki, Hassibi, and Effros on linear erasure networks with interference, and the work of Smith and Vishwanath on linear erasure networks with broadcast. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('0905.3086v2-abstract-full').style.display = 'none'; document.getElementById('0905.3086v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 May, 2009; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 May, 2009; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2009. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, to appear in proc. IEEE ISIT, June 2009</span> </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10