CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 55 results for author: <span class="mathjax">Tran, N H</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Tran%2C+N+H">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Tran, N H"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Tran%2C+N+H&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Tran, N H"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Tran%2C+N+H&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Tran%2C+N+H&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Tran%2C+N+H&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2412.05683">arXiv:2412.05683</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2412.05683">pdf</a>, <a href="https://arxiv.org/format/2412.05683">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Digital Libraries">cs.DL</span> </div> </div> <p class="title is-5 mathjax"> Enhancing Research Methodology and Academic Publishing: A Structured Framework for Quality and Integrity </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Piran%2C+M+J">Md. Jalil Piran</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2412.05683v2-abstract-short" style="display: inline;"> Following a brief introduction to research, research processes, research types, papers, reviews, and evaluations, this paper presents a structured framework for addressing inconsistencies in research methodology, technical writing, quality assessment, and publication standards across academic disciplines. Using a four-dimensional evaluation model that focuses on 1) technical content, 2) structural&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.05683v2-abstract-full').style.display = 'inline'; document.getElementById('2412.05683v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2412.05683v2-abstract-full" style="display: none;"> Following a brief introduction to research, research processes, research types, papers, reviews, and evaluations, this paper presents a structured framework for addressing inconsistencies in research methodology, technical writing, quality assessment, and publication standards across academic disciplines. Using a four-dimensional evaluation model that focuses on 1) technical content, 2) structural coherence, 3) writing precision, and 4) ethical integrity, this framework not only standardizes review and publication processes but also serves as a practical guide for authors in preparing high-quality manuscripts. Each of these four dimensions cannot be compromised for the sake of another. Following that, we discuss the components of a research paper adhering to the four-dimensional evaluation model in detail by providing guidelines and principles. By aligning manuscripts with journal standards, reducing review bias, and enhancing transparency, the framework contributes to more reliable and reproducible research results. Moreover, by strengthening cross-disciplinary credibility, improving publication consistency, and fostering public trust in academic literature, this initiative is expected to positively influence both research quality and scholarly publishing&#39;s reputation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.05683v2-abstract-full').style.display = 'none'; document.getElementById('2412.05683v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.02845">arXiv:2410.02845</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.02845">pdf</a>, <a href="https://arxiv.org/format/2410.02845">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Towards Layer-Wise Personalized Federated Learning: Adaptive Layer Disentanglement via Conflicting Gradients </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+M+D">Minh Duong Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Le%2C+K">Khanh Le</a>, <a href="/search/cs?searchtype=author&amp;query=Do%2C+K">Khoi Do</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+D">Duc Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Trinh%2C+C">Chien Trinh</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Z">Zhaohui Yang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.02845v1-abstract-short" style="display: inline;"> In personalized Federated Learning (pFL), high data heterogeneity can cause significant gradient divergence across devices, adversely affecting the learning process. This divergence, especially when gradients from different users form an obtuse angle during aggregation, can negate progress, leading to severe weight and gradient update degradation. To address this issue, we introduce a new approach&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.02845v1-abstract-full').style.display = 'inline'; document.getElementById('2410.02845v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.02845v1-abstract-full" style="display: none;"> In personalized Federated Learning (pFL), high data heterogeneity can cause significant gradient divergence across devices, adversely affecting the learning process. This divergence, especially when gradients from different users form an obtuse angle during aggregation, can negate progress, leading to severe weight and gradient update degradation. To address this issue, we introduce a new approach to pFL design, namely Federated Learning with Layer-wise Aggregation via Gradient Analysis (FedLAG), utilizing the concept of gradient conflict at the layer level. Specifically, when layer-wise gradients of different clients form acute angles, those gradients align in the same direction, enabling updates across different clients toward identifying client-invariant features. Conversely, when layer-wise gradient pairs make create obtuse angles, the layers tend to focus on client-specific tasks. In hindsights, FedLAG assigns layers for personalization based on the extent of layer-wise gradient conflicts. Specifically, layers with gradient conflicts are excluded from the global aggregation process. The theoretical evaluation demonstrates that when integrated into other pFL baselines, FedLAG enhances pFL performance by a certain margin. Therefore, our proposed method achieves superior convergence behavior compared with other baselines. Extensive experiments show that our FedLAG outperforms several state-of-the-art methods and can be easily incorporated with many existing methods to further enhance performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.02845v1-abstract-full').style.display = 'none'; document.getElementById('2410.02845v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.07421">arXiv:2407.07421</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.07421">pdf</a>, <a href="https://arxiv.org/format/2407.07421">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TNET.2024.3423780">10.1109/TNET.2024.3423780 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Federated PCA on Grassmann Manifold for IoT Anomaly Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+T">Tung-Anh Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Le%2C+L+T">Long Tan Le</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+T+D">Tuan Dung Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Bao%2C+W">Wei Bao</a>, <a href="/search/cs?searchtype=author&amp;query=Seneviratne%2C+S">Suranga Seneviratne</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.07421v1-abstract-short" style="display: inline;"> With the proliferation of the Internet of Things (IoT) and the rising interconnectedness of devices, network security faces significant challenges, especially from anomalous activities. While traditional machine learning-based intrusion detection systems (ML-IDS) effectively employ supervised learning methods, they possess limitations such as the requirement for labeled data and challenges with hi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.07421v1-abstract-full').style.display = 'inline'; document.getElementById('2407.07421v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.07421v1-abstract-full" style="display: none;"> With the proliferation of the Internet of Things (IoT) and the rising interconnectedness of devices, network security faces significant challenges, especially from anomalous activities. While traditional machine learning-based intrusion detection systems (ML-IDS) effectively employ supervised learning methods, they possess limitations such as the requirement for labeled data and challenges with high dimensionality. Recent unsupervised ML-IDS approaches such as AutoEncoders and Generative Adversarial Networks (GAN) offer alternative solutions but pose challenges in deployment onto resource-constrained IoT devices and in interpretability. To address these concerns, this paper proposes a novel federated unsupervised anomaly detection framework, FedPCA, that leverages Principal Component Analysis (PCA) and the Alternating Directions Method Multipliers (ADMM) to learn common representations of distributed non-i.i.d. datasets. Building on the FedPCA framework, we propose two algorithms, FEDPE in Euclidean space and FEDPG on Grassmann manifolds. Our approach enables real-time threat detection and mitigation at the device level, enhancing network resilience while ensuring privacy. Moreover, the proposed algorithms are accompanied by theoretical convergence rates even under a subsampling scheme, a novel result. Experimental results on the UNSW-NB15 and TON-IoT datasets show that our proposed methods offer performance in anomaly detection comparable to nonlinear baselines, while providing significant improvements in communication and memory efficiency, underscoring their potential for securing IoT networks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.07421v1-abstract-full').style.display = 'none'; document.getElementById('2407.07421v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication at IEEE/ACM Transactions on Networking</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEEE/ACM Transactions on Networking On page(s): 1-16 Print ISSN: 1063-6692 Online ISSN: 1558-2566 Digital Object Identifier: 10.1109/TNET.2024.3423780 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.15230">arXiv:2405.15230</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.15230">pdf</a>, <a href="https://arxiv.org/format/2405.15230">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> $i$REPO: $i$mplicit Reward Pairwise Difference based Empirical Preference Optimization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Le%2C+L+T">Long Tan Le</a>, <a href="/search/cs?searchtype=author&amp;query=Shu%2C+H">Han Shu</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+T">Tung-Anh Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.15230v2-abstract-short" style="display: inline;"> While astonishingly capable, large Language Models (LLM) can sometimes produce outputs that deviate from human expectations. Such deviations necessitate an alignment phase to prevent disseminating untruthful, toxic, or biased information. Traditional alignment methods based on reinforcement learning often struggle with the identified instability, whereas preference optimization methods are limited&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.15230v2-abstract-full').style.display = 'inline'; document.getElementById('2405.15230v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.15230v2-abstract-full" style="display: none;"> While astonishingly capable, large Language Models (LLM) can sometimes produce outputs that deviate from human expectations. Such deviations necessitate an alignment phase to prevent disseminating untruthful, toxic, or biased information. Traditional alignment methods based on reinforcement learning often struggle with the identified instability, whereas preference optimization methods are limited by their overfitting to pre-collected hard-label datasets. In this paper, we propose a novel LLM alignment framework named $i$REPO, which utilizes implicit Reward pairwise difference regression for Empirical Preference Optimization. Particularly, $i$REPO employs self-generated datasets labeled by empirical human (or AI annotator) preference to iteratively refine the aligned policy through a novel regression-based loss function. Furthermore, we introduce an innovative algorithm backed by theoretical guarantees for achieving optimal results under ideal assumptions and providing a practical performance-gap result without such assumptions. Experimental results with Phi-2 and Mistral-7B demonstrate that $i$REPO effectively achieves self-alignment using soft-label, self-generated responses and the logit of empirical AI annotators. Furthermore, our approach surpasses preference optimization baselines in evaluations using the Language Model Evaluation Harness and Multi-turn benchmarks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.15230v2-abstract-full').style.display = 'none'; document.getElementById('2405.15230v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Under Review</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.05393">arXiv:2404.05393</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.05393">pdf</a>, <a href="https://arxiv.org/format/2404.05393">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> PAT: Pixel-wise Adaptive Training for Long-tailed Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Do%2C+K">Khoi Do</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+D">Duong Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+V+D">Viet Dung Nguyen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.05393v4-abstract-short" style="display: inline;"> Beyond class frequency, we recognize the impact of class-wise relationships among various class-specific predictions and the imbalance in label masks on long-tailed segmentation learning. To address these challenges, we propose an innovative Pixel-wise Adaptive Training (PAT) technique tailored for long-tailed segmentation. PAT has two key features: 1) class-wise gradient magnitude homogenization,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.05393v4-abstract-full').style.display = 'inline'; document.getElementById('2404.05393v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.05393v4-abstract-full" style="display: none;"> Beyond class frequency, we recognize the impact of class-wise relationships among various class-specific predictions and the imbalance in label masks on long-tailed segmentation learning. To address these challenges, we propose an innovative Pixel-wise Adaptive Training (PAT) technique tailored for long-tailed segmentation. PAT has two key features: 1) class-wise gradient magnitude homogenization, and 2) pixel-wise class-specific loss adaptation (PCLA). First, the class-wise gradient magnitude homogenization helps alleviate the imbalance among label masks by ensuring equal consideration of the class-wise impact on model updates. Second, PCLA tackles the detrimental impact of both rare classes within the long-tailed distribution and inaccurate predictions from previous training stages by encouraging learning classes with low prediction confidence and guarding against forgetting classes with high confidence. This combined approach fosters robust learning while preventing the model from forgetting previously learned knowledge. PAT exhibits significant performance improvements, surpassing the current state-of-the-art by 2.2% in the NyU dataset. Moreover, it enhances overall pixel-wise accuracy by 2.85% and intersection over union value by 2.07%, with a particularly notable declination of 0.39% in detecting rare classes compared to Balance Logits Variation, as demonstrated on the three popular datasets, i.e., OxfordPetIII, CityScape, and NYU. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.05393v4-abstract-full').style.display = 'none'; document.getElementById('2404.05393v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.13822">arXiv:2402.13822</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2402.13822">pdf</a>, <a href="https://arxiv.org/format/2402.13822">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> MSTAR: Multi-Scale Backbone Architecture Search for Timeseries Classification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cao%2C+T+M">Tue M. Cao</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nhat H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Pham%2C+H+H">Hieu H. Pham</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+H+T">Hung T. Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+L+P">Le P. Nguyen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.13822v1-abstract-short" style="display: inline;"> Most of the previous approaches to Time Series Classification (TSC) highlight the significance of receptive fields and frequencies while overlooking the time resolution. Hence, unavoidably suffered from scalability issues as they integrated an extensive range of receptive fields into classification models. Other methods, while having a better adaptation for large datasets, require manual design an&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.13822v1-abstract-full').style.display = 'inline'; document.getElementById('2402.13822v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.13822v1-abstract-full" style="display: none;"> Most of the previous approaches to Time Series Classification (TSC) highlight the significance of receptive fields and frequencies while overlooking the time resolution. Hence, unavoidably suffered from scalability issues as they integrated an extensive range of receptive fields into classification models. Other methods, while having a better adaptation for large datasets, require manual design and yet not being able to reach the optimal architecture due to the uniqueness of each dataset. We overcome these challenges by proposing a novel multi-scale search space and a framework for Neural architecture search (NAS), which addresses both the problem of frequency and time resolution, discovering the suitable scale for a specific dataset. We further show that our model can serve as a backbone to employ a powerful Transformer module with both untrained and pre-trained weights. Our search space reaches the state-of-the-art performance on four datasets on four different domains while introducing more than ten highly fine-tuned models for each data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.13822v1-abstract-full').style.display = 'none'; document.getElementById('2402.13822v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.09445">arXiv:2312.09445</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.09445">pdf</a>, <a href="https://arxiv.org/format/2312.09445">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> IncepSE: Leveraging InceptionTime&#39;s performance with Squeeze and Excitation mechanism in ECG analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cao%2C+T+M">Tue Minh Cao</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nhat Hong Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+L+P">Le Phi Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Pham%2C+H+H">Hieu Huy Pham</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+H+T">Hung Thanh Nguyen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.09445v1-abstract-short" style="display: inline;"> Our study focuses on the potential for modifications of Inception-like architecture within the electrocardiogram (ECG) domain. To this end, we introduce IncepSE, a novel network characterized by strategic architectural incorporation that leverages the strengths of both InceptionTime and channel attention mechanisms. Furthermore, we propose a training setup that employs stabilization techniques tha&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.09445v1-abstract-full').style.display = 'inline'; document.getElementById('2312.09445v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.09445v1-abstract-full" style="display: none;"> Our study focuses on the potential for modifications of Inception-like architecture within the electrocardiogram (ECG) domain. To this end, we introduce IncepSE, a novel network characterized by strategic architectural incorporation that leverages the strengths of both InceptionTime and channel attention mechanisms. Furthermore, we propose a training setup that employs stabilization techniques that are aimed at tackling the formidable challenges of severe imbalance dataset PTB-XL and gradient corruption. By this means, we manage to set a new height for deep learning model in a supervised learning manner across the majority of tasks. Our model consistently surpasses InceptionTime by substantial margins compared to other state-of-the-arts in this domain, noticeably 0.013 AUROC score improvement in the &#34;all&#34; task, while also mitigating the inherent dataset fluctuations during training. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.09445v1-abstract-full').style.display = 'none'; document.getElementById('2312.09445v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.15659">arXiv:2309.15659</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2309.15659">pdf</a>, <a href="https://arxiv.org/format/2309.15659">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3627673.3679752">10.1145/3627673.3679752 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Federated Deep Equilibrium Learning: Harnessing Compact Global Representations to Enhance Personalization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Le%2C+L+T">Long Tan Le</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+T+D">Tuan Dung Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+T">Tung-Anh Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a>, <a href="/search/cs?searchtype=author&amp;query=Seneviratne%2C+S">Suranga Seneviratne</a>, <a href="/search/cs?searchtype=author&amp;query=Bao%2C+W">Wei Bao</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.15659v2-abstract-short" style="display: inline;"> Federated Learning (FL) has emerged as a groundbreaking distributed learning paradigm enabling clients to train a global model collaboratively without exchanging data. Despite enhancing privacy and efficiency in information retrieval and knowledge management contexts, training and deploying FL models confront significant challenges such as communication bottlenecks, data heterogeneity, and memory&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.15659v2-abstract-full').style.display = 'inline'; document.getElementById('2309.15659v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.15659v2-abstract-full" style="display: none;"> Federated Learning (FL) has emerged as a groundbreaking distributed learning paradigm enabling clients to train a global model collaboratively without exchanging data. Despite enhancing privacy and efficiency in information retrieval and knowledge management contexts, training and deploying FL models confront significant challenges such as communication bottlenecks, data heterogeneity, and memory limitations. To comprehensively address these challenges, we introduce FeDEQ, a novel FL framework that incorporates deep equilibrium learning and consensus optimization to harness compact global data representations for efficient personalization. Specifically, we design a unique model structure featuring an equilibrium layer for global representation extraction, followed by explicit layers tailored for local personalization. We then propose a novel FL algorithm rooted in the alternating directions method of multipliers (ADMM), which enables the joint optimization of a shared equilibrium layer and individual personalized layers across distributed datasets. Our theoretical analysis confirms that FeDEQ converges to a stationary point, achieving both compact global representations and optimal personalized parameters for each client. Extensive experiments on various benchmarks demonstrate that FeDEQ matches the performance of state-of-the-art personalized FL methods, while significantly reducing communication size by up to 4 times and memory footprint by 1.5 times during training. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.15659v2-abstract-full').style.display = 'none'; document.getElementById('2309.15659v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at CIKM 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.15860">arXiv:2306.15860</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2306.15860">pdf</a>, <a href="https://arxiv.org/format/2306.15860">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> Federated Deep Reinforcement Learning-based Bitrate Adaptation for Dynamic Adaptive Streaming over HTTP </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Vo%2C+P+L">Phuong L. Vo</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+N+T">Nghia T. Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Luu%2C+L">Long Luu</a>, <a href="/search/cs?searchtype=author&amp;query=Dinh%2C+C+T">Canh T. Dinh</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Le%2C+T">Tuan-Anh Le</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.15860v1-abstract-short" style="display: inline;"> In video streaming over HTTP, the bitrate adaptation selects the quality of video chunks depending on the current network condition. Some previous works have applied deep reinforcement learning (DRL) algorithms to determine the chunk&#39;s bitrate from the observed states to maximize the quality-of-experience (QoE). However, to build an intelligent model that can predict in various environments, such&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.15860v1-abstract-full').style.display = 'inline'; document.getElementById('2306.15860v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.15860v1-abstract-full" style="display: none;"> In video streaming over HTTP, the bitrate adaptation selects the quality of video chunks depending on the current network condition. Some previous works have applied deep reinforcement learning (DRL) algorithms to determine the chunk&#39;s bitrate from the observed states to maximize the quality-of-experience (QoE). However, to build an intelligent model that can predict in various environments, such as 3G, 4G, Wifi, \textit{etc.}, the states observed from these environments must be sent to a server for training centrally. In this work, we integrate federated learning (FL) to DRL-based rate adaptation to train a model appropriate for different environments. The clients in the proposed framework train their model locally and only update the weights to the server. The simulations show that our federated DRL-based rate adaptations, called FDRLABR with different DRL algorithms, such as deep Q-learning, advantage actor-critic, and proximal policy optimization, yield better performance than the traditional bitrate adaptation methods in various environments. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.15860v1-abstract-full').style.display = 'none'; document.getElementById('2306.15860v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages, 1 column</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2304.11080">arXiv:2304.11080</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2304.11080">pdf</a>, <a href="https://arxiv.org/format/2304.11080">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Multimodal contrastive learning for diagnosing cardiovascular diseases from electrocardiography (ECG) signals and patient metadata </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cao%2C+T+M">Tue M. Cao</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nhat H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+P+L">Phi Le Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Pham%2C+H">Hieu Pham</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2304.11080v1-abstract-short" style="display: inline;"> This work discusses the use of contrastive learning and deep learning for diagnosing cardiovascular diseases from electrocardiography (ECG) signals. While the ECG signals usually contain 12 leads (channels), many healthcare facilities and devices lack access to all these 12 leads. This raises the problem of how to use only fewer ECG leads to produce meaningful diagnoses with high performance. We i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.11080v1-abstract-full').style.display = 'inline'; document.getElementById('2304.11080v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2304.11080v1-abstract-full" style="display: none;"> This work discusses the use of contrastive learning and deep learning for diagnosing cardiovascular diseases from electrocardiography (ECG) signals. While the ECG signals usually contain 12 leads (channels), many healthcare facilities and devices lack access to all these 12 leads. This raises the problem of how to use only fewer ECG leads to produce meaningful diagnoses with high performance. We introduce a simple experiment to test whether contrastive learning can be applied to this task. More specifically, we added the similarity between the embedding vectors when the 12 leads signal and the fewer leads ECG signal to the loss function to bring these representations closer together. Despite its simplicity, this has been shown to have improved the performance of diagnosing with all lead combinations, proving the potential of contrastive learning on this task. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.11080v1-abstract-full').style.display = 'none'; document.getElementById('2304.11080v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for presentation at the Midwest Machine Learning Symposium (MMLS 2023), Chicago, IL, USA</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2212.12121">arXiv:2212.12121</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2212.12121">pdf</a>, <a href="https://arxiv.org/format/2212.12121">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Federated PCA on Grassmann Manifold for Anomaly Detection in IoT Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+T">Tung-Anh Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+J">Jiayu He</a>, <a href="/search/cs?searchtype=author&amp;query=Le%2C+L+T">Long Tan Le</a>, <a href="/search/cs?searchtype=author&amp;query=Bao%2C+W">Wei Bao</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2212.12121v2-abstract-short" style="display: inline;"> In the era of Internet of Things (IoT), network-wide anomaly detection is a crucial part of monitoring IoT networks due to the inherent security vulnerabilities of most IoT devices. Principal Components Analysis (PCA) has been proposed to separate network traffics into two disjoint subspaces corresponding to normal and malicious behaviors for anomaly detection. However, the privacy concerns and li&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.12121v2-abstract-full').style.display = 'inline'; document.getElementById('2212.12121v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2212.12121v2-abstract-full" style="display: none;"> In the era of Internet of Things (IoT), network-wide anomaly detection is a crucial part of monitoring IoT networks due to the inherent security vulnerabilities of most IoT devices. Principal Components Analysis (PCA) has been proposed to separate network traffics into two disjoint subspaces corresponding to normal and malicious behaviors for anomaly detection. However, the privacy concerns and limitations of devices&#39; computing resources compromise the practical effectiveness of PCA. We propose a federated PCA-based Grassmannian optimization framework that coordinates IoT devices to aggregate a joint profile of normal network behaviors for anomaly detection. First, we introduce a privacy-preserving federated PCA framework to simultaneously capture the profile of various IoT devices&#39; traffic. Then, we investigate the alternating direction method of multipliers gradient-based learning on the Grassmann manifold to guarantee fast training and the absence of detecting latency using limited computational resources. Empirical results on the NSL-KDD dataset demonstrate that our method outperforms baseline approaches. Finally, we show that the Grassmann manifold algorithm is highly adapted for IoT anomaly detection, which permits drastically reducing the analysis time of the system. To the best of our knowledge, this is the first federated PCA algorithm for anomaly detection meeting the requirements of IoT networks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.12121v2-abstract-full').style.display = 'none'; document.getElementById('2212.12121v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">accepted at IEEE INFOCOM 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.01432">arXiv:2206.01432</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2206.01432">pdf</a>, <a href="https://arxiv.org/format/2206.01432">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> On the Generalization of Wasserstein Robust Federated Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+T">Tung-Anh Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+T+D">Tuan Dung Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Le%2C+L+T">Long Tan Le</a>, <a href="/search/cs?searchtype=author&amp;query=Dinh%2C+C+T">Canh T. Dinh</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.01432v1-abstract-short" style="display: inline;"> In federated learning, participating clients typically possess non-i.i.d. data, posing a significant challenge to generalization to unseen distributions. To address this, we propose a Wasserstein distributionally robust optimization scheme called WAFL. Leveraging its duality, we frame WAFL as an empirical surrogate risk minimization problem, and solve it using a local SGD-based algorithm with conv&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.01432v1-abstract-full').style.display = 'inline'; document.getElementById('2206.01432v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.01432v1-abstract-full" style="display: none;"> In federated learning, participating clients typically possess non-i.i.d. data, posing a significant challenge to generalization to unseen distributions. To address this, we propose a Wasserstein distributionally robust optimization scheme called WAFL. Leveraging its duality, we frame WAFL as an empirical surrogate risk minimization problem, and solve it using a local SGD-based algorithm with convergence guarantees. We show that the robustness of WAFL is more general than related approaches, and the generalization bound is robust to all adversarial distributions inside the Wasserstein ball (ambiguity set). Since the center location and radius of the Wasserstein ball can be suitably modified, WAFL shows its applicability not only in robustness but also in domain adaptation. Through empirical evaluation, we demonstrate that WAFL generalizes better than the vanilla FedAvg in non-i.i.d. settings, and is more robust than other related methods in distribution shift settings. Further, using benchmark datasets we show that WAFL is capable of generalizing to unseen target domains. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.01432v1-abstract-full').style.display = 'none'; document.getElementById('2206.01432v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2201.08605">arXiv:2201.08605</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2201.08605">pdf</a>, <a href="https://arxiv.org/format/2201.08605">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> Seamless and Energy Efficient Maritime Coverage in Coordinated 6G Space-Air-Sea Non-Terrestrial Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hassan%2C+S+S">Sheikh Salman Hassan</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+D+H">Do Hyeon Kim</a>, <a href="/search/cs?searchtype=author&amp;query=Tun%2C+Y+K">Yan Kyaw Tun</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Saad%2C+W">Walid Saad</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2201.08605v1-abstract-short" style="display: inline;"> Non-terrestrial networks (NTNs), which integrate space and aerial networks with terrestrial systems, are a key area in the emerging sixth-generation (6G) wireless networks. As part of 6G, NTNs must provide pervasive connectivity to a wide range of devices, including smartphones, vehicles, sensors, robots, and maritime users. However, due to the high mobility and deployment of NTNs, managing the sp&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.08605v1-abstract-full').style.display = 'inline'; document.getElementById('2201.08605v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2201.08605v1-abstract-full" style="display: none;"> Non-terrestrial networks (NTNs), which integrate space and aerial networks with terrestrial systems, are a key area in the emerging sixth-generation (6G) wireless networks. As part of 6G, NTNs must provide pervasive connectivity to a wide range of devices, including smartphones, vehicles, sensors, robots, and maritime users. However, due to the high mobility and deployment of NTNs, managing the space-air-sea (SAS) NTN resources, i.e., energy, power, and channel allocation, is a major challenge. The design of a SAS-NTN for energy-efficient resource allocation is investigated in this study. The goal is to maximize system energy efficiency (EE) by collaboratively optimizing user equipment (UE) association, power control, and unmanned aerial vehicle (UAV) deployment. Given the limited payloads of UAVs, this work focuses on minimizing the total energy cost of UAVs (trajectory and transmission) while meeting EE requirements. A mixed-integer nonlinear programming problem is proposed, followed by the development of an algorithm to decompose, and solve each problem distributedly. The binary (UE association) and continuous (power, deployment) variables are separated using the Bender decomposition (BD), and then the Dinkelbach algorithm (DA) is used to convert fractional programming into an equivalent solvable form in the subproblem. A standard optimization solver is utilized to deal with the complexity of the master problem for binary variables. The alternating direction method of multipliers (ADMM) algorithm is used to solve the subproblem for the continuous variables. Our proposed algorithm provides a suboptimal solution, and simulation results demonstrate that the proposed algorithm achieves better EE than baselines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.08605v1-abstract-full').style.display = 'none'; document.getElementById('2201.08605v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2107.14036">arXiv:2107.14036</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2107.14036">pdf</a>, <a href="https://arxiv.org/ps/2107.14036">ps</a>, <a href="https://arxiv.org/format/2107.14036">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> Self-Driving Cars and Driver Alertness </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Nayak%2C+A+C">Abhaya C Nayak</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2107.14036v1-abstract-short" style="display: inline;"> Recent years have seen growing interest in the development of self-driving vehicles that promise (or threaten) to replace human drivers with intelligent software. However, current self-driving cars still require human supervision and prompt takeover of control when necessary. Poor alertness while controlling self-driving cars could hinder the drivers&#39; ability to intervene during unpredictable situ&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.14036v1-abstract-full').style.display = 'inline'; document.getElementById('2107.14036v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2107.14036v1-abstract-full" style="display: none;"> Recent years have seen growing interest in the development of self-driving vehicles that promise (or threaten) to replace human drivers with intelligent software. However, current self-driving cars still require human supervision and prompt takeover of control when necessary. Poor alertness while controlling self-driving cars could hinder the drivers&#39; ability to intervene during unpredictable situations, thus increasing the risk of avoidable accidents. In this paper we examine the key factors that contribute to drivers&#39; poor alertness, and the potential solutions that have been proposed to address them. Based on this examination we make some recommendations for various stakeholders, such as researchers, drivers, industry and policy makers. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.14036v1-abstract-full').style.display = 'none'; document.getElementById('2107.14036v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 July, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages. Planned to be submitted to the 34th Australasian Joint Conference on Artificial Intelligence (AJCAI) 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.07148">arXiv:2102.07148</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2102.07148">pdf</a>, <a href="https://arxiv.org/format/2102.07148">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> A New Look and Convergence Rate of Federated Multi-Task Learning with Laplacian Regularization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Dinh%2C+C+T">Canh T. Dinh</a>, <a href="/search/cs?searchtype=author&amp;query=Vu%2C+T+T">Tung T. Vu</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Dao%2C+M+N">Minh N. Dao</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H">Hongyu Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.07148v5-abstract-short" style="display: inline;"> Non-Independent and Identically Distributed (non- IID) data distribution among clients is considered as the key factor that degrades the performance of federated learning (FL). Several approaches to handle non-IID data such as personalized FL and federated multi-task learning (FMTL) are of great interest to research communities. In this work, first, we formulate the FMTL problem using Laplacian re&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.07148v5-abstract-full').style.display = 'inline'; document.getElementById('2102.07148v5-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.07148v5-abstract-full" style="display: none;"> Non-Independent and Identically Distributed (non- IID) data distribution among clients is considered as the key factor that degrades the performance of federated learning (FL). Several approaches to handle non-IID data such as personalized FL and federated multi-task learning (FMTL) are of great interest to research communities. In this work, first, we formulate the FMTL problem using Laplacian regularization to explicitly leverage the relationships among the models of clients for multi-task learning. Then, we introduce a new view of the FMTL problem, which in the first time shows that the formulated FMTL problem can be used for conventional FL and personalized FL. We also propose two algorithms FedU and dFedU to solve the formulated FMTL problem in communication-centralized and decentralized schemes, respectively. Theoretically, we prove that the convergence rates of both algorithms achieve linear speedup for strongly convex and sublinear speedup of order 1/2 for nonconvex objectives. Experimentally, we show that our algorithms outperform the algorithm FedAvg, FedProx, SCAFFOLD, and AFL in FL settings, MOCHA in FMTL settings, as well as pFedMe and Per-FedAvg in personalized FL settings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.07148v5-abstract-full').style.display = 'none'; document.getElementById('2102.07148v5-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2012.05625">arXiv:2012.05625</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2012.05625">pdf</a>, <a href="https://arxiv.org/format/2012.05625">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> DONE: Distributed Approximate Newton-type Method for Federated Edge Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Dinh%2C+C+T">Canh T. Dinh</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+T+D">Tuan Dung Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Bao%2C+W">Wei Bao</a>, <a href="/search/cs?searchtype=author&amp;query=Balef%2C+A+R">Amir Rezaei Balef</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+B+B">Bing B. Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Zomaya%2C+A+Y">Albert Y. Zomaya</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2012.05625v4-abstract-short" style="display: inline;"> There is growing interest in applying distributed machine learning to edge computing, forming federated edge learning. Federated edge learning faces non-i.i.d. and heterogeneous data, and the communication between edge workers, possibly through distant locations and with unstable wireless networks, is more costly than their local computational overhead. In this work, we propose DONE, a distributed&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.05625v4-abstract-full').style.display = 'inline'; document.getElementById('2012.05625v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2012.05625v4-abstract-full" style="display: none;"> There is growing interest in applying distributed machine learning to edge computing, forming federated edge learning. Federated edge learning faces non-i.i.d. and heterogeneous data, and the communication between edge workers, possibly through distant locations and with unstable wireless networks, is more costly than their local computational overhead. In this work, we propose DONE, a distributed approximate Newton-type algorithm with fast convergence rate for communication-efficient federated edge learning. First, with strongly convex and smooth loss functions, DONE approximates the Newton direction in a distributed manner using the classical Richardson iteration on each edge worker. Second, we prove that DONE has linear-quadratic convergence and analyze its communication complexities. Finally, the experimental results with non-i.i.d. and heterogeneous data show that DONE attains a comparable performance to the Newton&#39;s method. Notably, DONE requires fewer communication iterations compared to distributed gradient descent and outperforms DANE and FEDL, state-of-the-art approaches, in the case of non-quadratic loss functions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.05625v4-abstract-full').style.display = 'none'; document.getElementById('2012.05625v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 December, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2012.00425">arXiv:2012.00425</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2012.00425">pdf</a>, <a href="https://arxiv.org/format/2012.00425">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/JIOT.2021.3085429">10.1109/JIOT.2021.3085429 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Edge-assisted Democratized Learning Towards Federated Analytics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pandey%2C+S+R">Shashi Raj Pandey</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+M+N+H">Minh N. H. Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Dang%2C+T+N">Tri Nguyen Dang</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Thar%2C+K">Kyi Thar</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+Z">Zhu Han</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2012.00425v3-abstract-short" style="display: inline;"> A recent take towards Federated Analytics (FA), which allows analytical insights of distributed datasets, reuses the Federated Learning (FL) infrastructure to evaluate the summary of model performances across the training devices. However, the current realization of FL adopts single server-multiple client architecture with limited scope for FA, which often results in learning models with poor gene&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.00425v3-abstract-full').style.display = 'inline'; document.getElementById('2012.00425v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2012.00425v3-abstract-full" style="display: none;"> A recent take towards Federated Analytics (FA), which allows analytical insights of distributed datasets, reuses the Federated Learning (FL) infrastructure to evaluate the summary of model performances across the training devices. However, the current realization of FL adopts single server-multiple client architecture with limited scope for FA, which often results in learning models with poor generalization, i.e., an ability to handle new/unseen data, for real-world applications. Moreover, a hierarchical FL structure with distributed computing platforms demonstrates incoherent model performances at different aggregation levels. Therefore, we need to design a robust learning mechanism than the FL that (i) unleashes a viable infrastructure for FA and (ii) trains learning models with better generalization capability. In this work, we adopt the novel democratized learning (Dem-AI) principles and designs to meet these objectives. Firstly, we show the hierarchical learning structure of the proposed edge-assisted democratized learning mechanism, namely Edge-DemLearn, as a practical framework to empower generalization capability in support of FA. Secondly, we validate Edge-DemLearn as a flexible model training mechanism to build a distributed control and aggregation methodology in regions by leveraging the distributed computing infrastructure. The distributed edge computing servers construct regional models, minimize the communication loads, and ensure distributed data analytic application&#39;s scalability. To that end, we adhere to a near-optimal two-sided many-to-one matching approach to handle the combinatorial constraints in Edge-DemLearn and solve it for fast knowledge acquisition with optimization of resource allocation and associations between multiple servers and devices. Extensive simulation results on real datasets demonstrate the effectiveness of the proposed methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.00425v3-abstract-full').style.display = 'none'; document.getElementById('2012.00425v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 1 December, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication in IEEE Internet of Things Journal</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2011.12469">arXiv:2011.12469</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2011.12469">pdf</a>, <a href="https://arxiv.org/format/2011.12469">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> Toward Multiple Federated Learning Services Resource Sharing in Mobile Edge Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+M+N+H">Minh N. H. Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Tun%2C+Y+K">Yan Kyaw Tun</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+Z">Zhu Han</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2011.12469v1-abstract-short" style="display: inline;"> Federated Learning is a new learning scheme for collaborative training a shared prediction model while keeping data locally on participating devices. In this paper, we study a new model of multiple federated learning services at the multi-access edge computing server. Accordingly, the sharing of CPU resources among learning services at each mobile device for the local training process and allocati&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.12469v1-abstract-full').style.display = 'inline'; document.getElementById('2011.12469v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2011.12469v1-abstract-full" style="display: none;"> Federated Learning is a new learning scheme for collaborative training a shared prediction model while keeping data locally on participating devices. In this paper, we study a new model of multiple federated learning services at the multi-access edge computing server. Accordingly, the sharing of CPU resources among learning services at each mobile device for the local training process and allocating communication resources among mobile devices for exchanging learning information must be considered. Furthermore, the convergence performance of different learning services depends on the hyper-learning rate parameter that needs to be precisely decided. Towards this end, we propose a joint resource optimization and hyper-learning rate control problem, namely MS-FEDL, regarding the energy consumption of mobile devices and overall learning time. We design a centralized algorithm based on the block coordinate descent method and a decentralized JP-miADMM algorithm for solving the MS-FEDL problem. Different from the centralized approach, the decentralized approach requires many iterations to obtain but it allows each learning service to independently manage the local resource and learning process without revealing the learning service information. Our simulation results demonstrate the convergence performance of our proposed algorithms and the superior performance of our proposed algorithms compared to the heuristic strategy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.12469v1-abstract-full').style.display = 'none'; document.getElementById('2011.12469v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2009.10269">arXiv:2009.10269</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2009.10269">pdf</a>, <a href="https://arxiv.org/format/2009.10269">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Science and Game Theory">cs.GT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> An Incentive Mechanism for Federated Learning in Wireless Cellular network: An Auction Approach </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Le%2C+T+H+T">Tra Huong Thi Le</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Tun%2C+Y+K">Yan Kyaw Tun</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+M+N+H">Minh N. H. Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Pandey%2C+S+R">Shashi Raj Pandey</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+Z">Zhu Han</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2009.10269v1-abstract-short" style="display: inline;"> Federated Learning (FL) is a distributed learning framework that can deal with the distributed issue in machine learning and still guarantee high learning performance. However, it is impractical that all users will sacrifice their resources to join the FL algorithm. This motivates us to study the incentive mechanism design for FL. In this paper, we consider a FL system that involves one base stati&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.10269v1-abstract-full').style.display = 'inline'; document.getElementById('2009.10269v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2009.10269v1-abstract-full" style="display: none;"> Federated Learning (FL) is a distributed learning framework that can deal with the distributed issue in machine learning and still guarantee high learning performance. However, it is impractical that all users will sacrifice their resources to join the FL algorithm. This motivates us to study the incentive mechanism design for FL. In this paper, we consider a FL system that involves one base station (BS) and multiple mobile users. The mobile users use their own data to train the local machine learning model, and then send the trained models to the BS, which generates the initial model, collects local models and constructs the global model. Then, we formulate the incentive mechanism between the BS and mobile users as an auction game where the BS is an auctioneer and the mobile users are the sellers. In the proposed game, each mobile user submits its bids according to the minimal energy cost that the mobile users experiences in participating in FL. To decide winners in the auction and maximize social welfare, we propose the primal-dual greedy auction mechanism. The proposed mechanism can guarantee three economic properties, namely, truthfulness, individual rationality and efficiency. Finally, numerical results are shown to demonstrate the performance effectiveness of our proposed mechanism. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.10269v1-abstract-full').style.display = 'none'; document.getElementById('2009.10269v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 September, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Paper-TW-Apr-20-0557(2020) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2009.08716">arXiv:2009.08716</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2009.08716">pdf</a>, <a href="https://arxiv.org/format/2009.08716">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TPDS.2022.3206480">10.1109/TPDS.2022.3206480 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Federated Learning with Nesterov Accelerated Gradient </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yang%2C+Z">Zhengjie Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Bao%2C+W">Wei Bao</a>, <a href="/search/cs?searchtype=author&amp;query=Yuan%2C+D">Dong Yuan</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Zomaya%2C+A+Y">Albert Y. Zomaya</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2009.08716v2-abstract-short" style="display: inline;"> Federated learning (FL) is a fast-developing technique that allows multiple workers to train a global model based on a distributed dataset. Conventional FL (FedAvg) employs gradient descent algorithm, which may not be efficient enough. Momentum is able to improve the situation by adding an additional momentum step to accelerate the convergence and has demonstrated its benefits in both centralized&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.08716v2-abstract-full').style.display = 'inline'; document.getElementById('2009.08716v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2009.08716v2-abstract-full" style="display: none;"> Federated learning (FL) is a fast-developing technique that allows multiple workers to train a global model based on a distributed dataset. Conventional FL (FedAvg) employs gradient descent algorithm, which may not be efficient enough. Momentum is able to improve the situation by adding an additional momentum step to accelerate the convergence and has demonstrated its benefits in both centralized and FL environments. It is well-known that Nesterov Accelerated Gradient (NAG) is a more advantageous form of momentum, but it is not clear how to quantify the benefits of NAG in FL so far. This motives us to propose FedNAG, which employs NAG in each worker as well as NAG momentum and model aggregation in the aggregator. We provide a detailed convergence analysis of FedNAG and compare it with FedAvg. Extensive experiments based on real-world datasets and trace-driven simulation are conducted, demonstrating that FedNAG increases the learning accuracy by 3-24% and decreases the total training time by 11-70% compared with the benchmarks under a wide range of settings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.08716v2-abstract-full').style.display = 'none'; document.getElementById('2009.08716v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 September, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">publised in TPDS. 18 pages, 6 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2009.07250">arXiv:2009.07250</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2009.07250">pdf</a>, <a href="https://arxiv.org/format/2009.07250">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> </div> <p class="title is-5 mathjax"> PointIso: Point Cloud Based Deep Learning Model for Detecting Arbitrary-Precision Peptide Features in LC-MS Map through Attention Based Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zohora%2C+F+T">Fatema Tuz Zohora</a>, <a href="/search/cs?searchtype=author&amp;query=Rahman%2C+M+Z">M Ziaur Rahman</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Ngoc Hieu Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Xin%2C+L">Lei Xin</a>, <a href="/search/cs?searchtype=author&amp;query=Shan%2C+B">Baozhen Shan</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+M">Ming Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2009.07250v1-abstract-short" style="display: inline;"> A promising technique of discovering disease biomarkers is to measure the relative protein abundance in multiple biofluid samples through liquid chromatography with tandem mass spectrometry (LC-MS/MS) based quantitative proteomics. The key step involves peptide feature detection in LC-MS map, along with its charge and intensity. Existing heuristic algorithms suffer from inaccurate parameters since&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.07250v1-abstract-full').style.display = 'inline'; document.getElementById('2009.07250v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2009.07250v1-abstract-full" style="display: none;"> A promising technique of discovering disease biomarkers is to measure the relative protein abundance in multiple biofluid samples through liquid chromatography with tandem mass spectrometry (LC-MS/MS) based quantitative proteomics. The key step involves peptide feature detection in LC-MS map, along with its charge and intensity. Existing heuristic algorithms suffer from inaccurate parameters since different settings of the parameters result in significantly different outcomes. Therefore, we propose PointIso, to serve the necessity of an automated system for peptide feature detection that is able to find out the proper parameters itself, and is easily adaptable to different types of datasets. It consists of an attention based scanning step for segmenting the multi-isotopic pattern of peptide features along with charge and a sequence classification step for grouping those isotopes into potential peptide features. PointIso is the first point cloud based, arbitrary-precision deep learning network to address the problem and achieves 98% detection of high quality MS/MS identifications in a benchmark dataset, which is higher than several other widely used algorithms. Besides contributing to the proteomics study, we believe our novel segmentation technique should serve the general image processing domain as well. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.07250v1-abstract-full').style.display = 'none'; document.getElementById('2009.07250v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 September, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 pages (main text) with 10 figures, then supplementary material of about 5 pages. preprint of journal submission</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2009.02031">arXiv:2009.02031</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2009.02031">pdf</a>, <a href="https://arxiv.org/ps/2009.02031">ps</a>, <a href="https://arxiv.org/format/2009.02031">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Joint Resource Allocation to Minimize Execution Time of Federated Learning in Cell-Free Massive MIMO </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Vu%2C+T+T">Tung T. Vu</a>, <a href="/search/cs?searchtype=author&amp;query=Ngo%2C+D+T">Duy T. Ngo</a>, <a href="/search/cs?searchtype=author&amp;query=Ngo%2C+H+Q">Hien Quoc Ngo</a>, <a href="/search/cs?searchtype=author&amp;query=Dao%2C+M+N">Minh N. Dao</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Middleton%2C+R+H">Richard H. Middleton</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2009.02031v3-abstract-short" style="display: inline;"> Due to its communication efficiency and privacy-preserving capability, federated learning (FL) has emerged as a promising framework for machine learning in 5G-and-beyond wireless networks. Of great interest is the design and optimization of new wireless network structures that support the stable and fast operation of FL. Cell-free massive multiple-input multiple-output (CFmMIMO) turns out to be a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.02031v3-abstract-full').style.display = 'inline'; document.getElementById('2009.02031v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2009.02031v3-abstract-full" style="display: none;"> Due to its communication efficiency and privacy-preserving capability, federated learning (FL) has emerged as a promising framework for machine learning in 5G-and-beyond wireless networks. Of great interest is the design and optimization of new wireless network structures that support the stable and fast operation of FL. Cell-free massive multiple-input multiple-output (CFmMIMO) turns out to be a suitable candidate, which allows each communication round in the iterative FL process to be stably executed within a large-scale coherence time. Aiming to reduce the total execution time of the FL process in CFmMIMO, this paper proposes choosing only a subset of available users to participate in FL. An optimal selection of users with favorable link conditions would minimize the execution time of each communication round, while limiting the total number of communication rounds required. Toward this end, we formulate a joint optimization problem of user selection, transmit power, and processing frequency, subject to a predefined minimum number of participating users to guarantee the quality of learning. We then develop a new algorithm that is proven to converge to the neighbourhood of the stationary points of the formulated problem. Numerical results confirm that our proposed approach significantly reduces the FL total execution time over baseline schemes. The time reduction is more pronounced when the density of access point deployments is moderately low. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.02031v3-abstract-full').style.display = 'none'; document.getElementById('2009.02031v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 September, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">accepted to appear in IEEE Internet of Things Journal, Jun. 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2007.03278">arXiv:2007.03278</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2007.03278">pdf</a>, <a href="https://arxiv.org/format/2007.03278">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Self-organizing Democratized Learning: Towards Large-scale Distributed Learning Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+M+N+H">Minh N. H. Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Pandey%2C+S+R">Shashi Raj Pandey</a>, <a href="/search/cs?searchtype=author&amp;query=Dang%2C+T+N">Tri Nguyen Dang</a>, <a href="/search/cs?searchtype=author&amp;query=Huh%2C+E">Eui-Nam Huh</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Saad%2C+W">Walid Saad</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2007.03278v3-abstract-short" style="display: inline;"> Emerging cross-device artificial intelligence (AI) applications require a transition from conventional centralized learning systems towards large-scale distributed AI systems that can collaboratively perform complex learning tasks. In this regard, democratized learning (Dem-AI) lays out a holistic philosophy with underlying principles for building large-scale distributed and democratized machine l&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.03278v3-abstract-full').style.display = 'inline'; document.getElementById('2007.03278v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2007.03278v3-abstract-full" style="display: none;"> Emerging cross-device artificial intelligence (AI) applications require a transition from conventional centralized learning systems towards large-scale distributed AI systems that can collaboratively perform complex learning tasks. In this regard, democratized learning (Dem-AI) lays out a holistic philosophy with underlying principles for building large-scale distributed and democratized machine learning systems. The outlined principles are meant to study a generalization in distributed learning systems that goes beyond existing mechanisms such as federated learning. Moreover, such learning systems rely on hierarchical self-organization of well-connected distributed learning agents who have limited and highly personalized data and can evolve and regulate themselves based on the underlying duality of specialized and generalized processes. Inspired by Dem-AI philosophy, a novel distributed learning approach is proposed in this paper. The approach consists of a self-organizing hierarchical structuring mechanism based on agglomerative clustering, hierarchical generalization, and corresponding learning mechanism. Subsequently, hierarchical generalized learning problems in recursive forms are formulated and shown to be approximately solved using the solutions of distributed personalized learning problems and hierarchical update mechanisms. To that end, a distributed learning algorithm, namely DemLearn is proposed. Extensive experiments on benchmark MNIST, Fashion-MNIST, FE-MNIST, and CIFAR-10 datasets show that the proposed algorithms demonstrate better results in the generalization performance of learning models in agents compared to the conventional FL algorithms. The detailed analysis provides useful observations to further handle both the generalization and specialization performance of the learning models in Dem-AI systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.03278v3-abstract-full').style.display = 'none'; document.getElementById('2007.03278v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 April, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2006.08848">arXiv:2006.08848</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2006.08848">pdf</a>, <a href="https://arxiv.org/format/2006.08848">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Personalized Federated Learning with Moreau Envelopes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Dinh%2C+C+T">Canh T. Dinh</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+T+D">Tuan Dung Nguyen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2006.08848v3-abstract-short" style="display: inline;"> Federated learning (FL) is a decentralized and privacy-preserving machine learning technique in which a group of clients collaborate with a server to learn a global model without sharing clients&#39; data. One challenge associated with FL is statistical diversity among clients, which restricts the global model from delivering good performance on each client&#39;s task. To address this, we propose an algor&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.08848v3-abstract-full').style.display = 'inline'; document.getElementById('2006.08848v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2006.08848v3-abstract-full" style="display: none;"> Federated learning (FL) is a decentralized and privacy-preserving machine learning technique in which a group of clients collaborate with a server to learn a global model without sharing clients&#39; data. One challenge associated with FL is statistical diversity among clients, which restricts the global model from delivering good performance on each client&#39;s task. To address this, we propose an algorithm for personalized FL (pFedMe) using Moreau envelopes as clients&#39; regularized loss functions, which help decouple personalized model optimization from the global model learning in a bi-level problem stylized for personalized FL. Theoretically, we show that pFedMe&#39;s convergence rate is state-of-the-art: achieving quadratic speedup for strongly convex and sublinear speedup of order 2/3 for smooth nonconvex objectives. Experimentally, we verify that pFedMe excels at empirical performance compared with the vanilla FedAvg and Per-FedAvg, a meta-learning based personalized FL algorithm. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.08848v3-abstract-full').style.display = 'none'; document.getElementById('2006.08848v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2006.00815">arXiv:2006.00815</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2006.00815">pdf</a>, <a href="https://arxiv.org/format/2006.00815">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Ruin Theory for Energy-Efficient Resource Allocation in UAV-assisted Cellular Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Manzoor%2C+A">Aunas Manzoor</a>, <a href="/search/cs?searchtype=author&amp;query=Kim%2C+K">Kitae Kim</a>, <a href="/search/cs?searchtype=author&amp;query=Pandey%2C+S+R">Shashi Raj Pandey</a>, <a href="/search/cs?searchtype=author&amp;query=Kazmi%2C+S+M+A">S. M. Ahsan Kazmi</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Saad%2C+W">Walid Saad</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2006.00815v1-abstract-short" style="display: inline;"> Unmanned aerial vehicles (UAVs) can provide an effective solution for improving the coverage, capacity, and the overall performance of terrestrial wireless cellular networks. In particular, UAV-assisted cellular networks can meet the stringent performance requirements of the fifth generation new radio (5G NR) applications. In this paper, the problem of energy-efficient resource allocation in UAV-a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.00815v1-abstract-full').style.display = 'inline'; document.getElementById('2006.00815v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2006.00815v1-abstract-full" style="display: none;"> Unmanned aerial vehicles (UAVs) can provide an effective solution for improving the coverage, capacity, and the overall performance of terrestrial wireless cellular networks. In particular, UAV-assisted cellular networks can meet the stringent performance requirements of the fifth generation new radio (5G NR) applications. In this paper, the problem of energy-efficient resource allocation in UAV-assisted cellular networks is studied under the reliability and latency constraints of 5G NR applications. The framework of ruin theory is employed to allow solar-powered UAVs to capture the dynamics of harvested and consumed energies. First, the surplus power of every UAV is modeled, and then it is used to compute the probability of ruin of the UAVs. The probability of ruin denotes the vulnerability of draining out the power of a UAV. Next, the probability of ruin is used for efficient user association with each UAV. Then, power allocation for 5G NR applications is performed to maximize the achievable network rate using the water-filling approach. Simulation results demonstrate that the proposed ruin-based scheme can enhance the flight duration up to 61% and the number of served users in a UAV flight by up to 58\%, compared to a baseline SINR-based scheme. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.00815v1-abstract-full').style.display = 'none'; document.getElementById('2006.00815v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2004.13245">arXiv:2004.13245</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2004.13245">pdf</a>, <a href="https://arxiv.org/format/2004.13245">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Deep Conversational Recommender Systems: A New Frontier for Goal-Oriented Dialogue Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tran%2C+D+H">Dai Hoang Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Sheng%2C+Q+Z">Quan Z. Sheng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+W+E">Wei Emma Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Hamad%2C+S+A">Salma Abdalla Hamad</a>, <a href="/search/cs?searchtype=author&amp;query=Zaib%2C+M">Munazza Zaib</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Yao%2C+L">Lina Yao</a>, <a href="/search/cs?searchtype=author&amp;query=Khoa%2C+N+L+D">Nguyen Lu Dang Khoa</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2004.13245v1-abstract-short" style="display: inline;"> In recent years, the emerging topics of recommender systems that take advantage of natural language processing techniques have attracted much attention, and one of their applications is the Conversational Recommender System (CRS). Unlike traditional recommender systems with content-based and collaborative filtering approaches, CRS learns and models user&#39;s preferences through interactive dialogue c&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.13245v1-abstract-full').style.display = 'inline'; document.getElementById('2004.13245v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2004.13245v1-abstract-full" style="display: none;"> In recent years, the emerging topics of recommender systems that take advantage of natural language processing techniques have attracted much attention, and one of their applications is the Conversational Recommender System (CRS). Unlike traditional recommender systems with content-based and collaborative filtering approaches, CRS learns and models user&#39;s preferences through interactive dialogue conversations. In this work, we provide a summarization of the recent evolution of CRS, where deep learning approaches are applied to CRS and have produced fruitful results. We first analyze the research problems and present key challenges in the development of Deep Conversational Recommender Systems (DCRS), then present the current state of the field taken from the most recent researches, including the most common deep learning models that benefit DCRS. Finally, we discuss future directions for this vibrant area. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.13245v1-abstract-full').style.display = 'none'; document.getElementById('2004.13245v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7 pages, 3 figures, 1 table</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2003.09301">arXiv:2003.09301</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2003.09301">pdf</a>, <a href="https://arxiv.org/format/2003.09301">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Distributed and Democratized Learning: Philosophy and Research Challenges </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+M+N+H">Minh N. H. Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Pandey%2C+S+R">Shashi Raj Pandey</a>, <a href="/search/cs?searchtype=author&amp;query=Thar%2C+K">Kyi Thar</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+M">Mingzhe Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Saad%2C+W">Walid Saad</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2003.09301v2-abstract-short" style="display: inline;"> Due to the availability of huge amounts of data and processing abilities, current artificial intelligence (AI) systems are effective in solving complex tasks. However, despite the success of AI in different areas, the problem of designing AI systems that can truly mimic human cognitive capabilities such as artificial general intelligence, remains largely open. Consequently, many emerging cross-dev&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.09301v2-abstract-full').style.display = 'inline'; document.getElementById('2003.09301v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2003.09301v2-abstract-full" style="display: none;"> Due to the availability of huge amounts of data and processing abilities, current artificial intelligence (AI) systems are effective in solving complex tasks. However, despite the success of AI in different areas, the problem of designing AI systems that can truly mimic human cognitive capabilities such as artificial general intelligence, remains largely open. Consequently, many emerging cross-device AI applications will require a transition from traditional centralized learning systems towards large-scale distributed AI systems that can collaboratively perform multiple complex learning tasks. In this paper, we propose a novel design philosophy called democratized learning (Dem-AI) whose goal is to build large-scale distributed learning systems that rely on the self-organization of distributed learning agents that are well-connected, but limited in learning capabilities. Correspondingly, inspired by the societal groups of humans, the specialized groups of learning agents in the proposed Dem-AI system are self-organized in a hierarchical structure to collectively perform learning tasks more efficiently. As such, the Dem-AI learning system can evolve and regulate itself based on the underlying duality of two processes which we call specialized and generalized processes. In this regard, we present a reference design as a guideline to realize future Dem-AI systems, inspired by various interdisciplinary fields. Accordingly, we introduce four underlying mechanisms in the design such as plasticity-stability transition mechanism, self-organizing hierarchical structuring, specialized learning, and generalization. Finally, we establish possible extensions and new challenges for the existing learning approaches to provide better scalable, flexible, and more powerful learning systems with the new setting of Dem-AI. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.09301v2-abstract-full').style.display = 'none'; document.getElementById('2003.09301v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 March, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2003.07651">arXiv:2003.07651</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2003.07651">pdf</a>, <a href="https://arxiv.org/format/2003.07651">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Intelligent Resource Slicing for eMBB and URLLC Coexistence in 5G and Beyond: A Deep Reinforcement Learning Based Approach </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Alsenwi%2C+M">Madyan Alsenwi</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Bennis%2C+M">Mehdi Bennis</a>, <a href="/search/cs?searchtype=author&amp;query=Pandey%2C+S+R">Shashi Raj Pandey</a>, <a href="/search/cs?searchtype=author&amp;query=Bairagi%2C+A+K">Anupam Kumar Bairagi</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2003.07651v3-abstract-short" style="display: inline;"> In this paper, we study the resource slicing problem in a dynamic multiplexing scenario of two distinct 5G services, namely Ultra-Reliable Low Latency Communications (URLLC) and enhanced Mobile BroadBand (eMBB). While eMBB services focus on high data rates, URLLC is very strict in terms of latency and reliability. In view of this, the resource slicing problem is formulated as an optimization probl&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.07651v3-abstract-full').style.display = 'inline'; document.getElementById('2003.07651v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2003.07651v3-abstract-full" style="display: none;"> In this paper, we study the resource slicing problem in a dynamic multiplexing scenario of two distinct 5G services, namely Ultra-Reliable Low Latency Communications (URLLC) and enhanced Mobile BroadBand (eMBB). While eMBB services focus on high data rates, URLLC is very strict in terms of latency and reliability. In view of this, the resource slicing problem is formulated as an optimization problem that aims at maximizing the eMBB data rate subject to a URLLC reliability constraint, while considering the variance of the eMBB data rate to reduce the impact of immediately scheduled URLLC traffic on the eMBB reliability. To solve the formulated problem, an optimization-aided Deep Reinforcement Learning (DRL) based framework is proposed, including: 1) eMBB resource allocation phase, and 2) URLLC scheduling phase. In the first phase, the optimization problem is decomposed into three subproblems and then each subproblem is transformed into a convex form to obtain an approximate resource allocation solution. In the second phase, a DRL-based algorithm is proposed to intelligently distribute the incoming URLLC traffic among eMBB users. Simulation results show that our proposed approach can satisfy the stringent URLLC reliability while keeping the eMBB reliability higher than 90%. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.07651v3-abstract-full').style.display = 'none'; document.getElementById('2003.07651v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 March, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This work was submitted to the IEEE Transactions on Wireless Communications</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2003.04816">arXiv:2003.04816</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2003.04816">pdf</a>, <a href="https://arxiv.org/format/2003.04816">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Data Freshness and Energy-Efficient UAV Navigation Optimization: A Deep Reinforcement Learning Approach </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Abedin%2C+S+F">Sarder Fakhrul Abedin</a>, <a href="/search/cs?searchtype=author&amp;query=Munir%2C+M+S">Md. Shirajum Munir</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+Z">Zhu Han</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2003.04816v1-abstract-short" style="display: inline;"> In this paper, we design a navigation policy for multiple unmanned aerial vehicles (UAVs) where mobile base stations (BSs) are deployed to improve the data freshness and connectivity to the Internet of Things (IoT) devices. First, we formulate an energy-efficient trajectory optimization problem in which the objective is to maximize the energy efficiency by optimizing the UAV-BS trajectory policy.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.04816v1-abstract-full').style.display = 'inline'; document.getElementById('2003.04816v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2003.04816v1-abstract-full" style="display: none;"> In this paper, we design a navigation policy for multiple unmanned aerial vehicles (UAVs) where mobile base stations (BSs) are deployed to improve the data freshness and connectivity to the Internet of Things (IoT) devices. First, we formulate an energy-efficient trajectory optimization problem in which the objective is to maximize the energy efficiency by optimizing the UAV-BS trajectory policy. We also incorporate different contextual information such as energy and age of information (AoI) constraints to ensure the data freshness at the ground BS. Second, we propose an agile deep reinforcement learning with experience replay model to solve the formulated problem concerning the contextual constraints for the UAV-BS navigation. Moreover, the proposed approach is well-suited for solving the problem, since the state space of the problem is extremely large and finding the best trajectory policy with useful contextual features is too complex for the UAV-BSs. By applying the proposed trained model, an effective real-time trajectory policy for the UAV-BSs captures the observable network states over time. Finally, the simulation results illustrate the proposed approach is 3.6% and 3.13% more energy efficient than those of the greedy and baseline deep Q Network (DQN) approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.04816v1-abstract-full').style.display = 'none'; document.getElementById('2003.04816v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submitted to IEEE Transactions on Intelligent Transportation Systems, Special Issue on Unmanned Aircraft System Traffic Management</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2003.04551">arXiv:2003.04551</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2003.04551">pdf</a>, <a href="https://arxiv.org/format/2003.04551">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Coexistence Mechanism between eMBB and uRLLC in 5G Wireless Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bairagi%2C+A+K">Anupam Kumar Bairagi</a>, <a href="/search/cs?searchtype=author&amp;query=Munir%2C+M+S">Md. Shirajum Munir</a>, <a href="/search/cs?searchtype=author&amp;query=Alsenwi%2C+M">Madyan Alsenwi</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Alshamrani%2C+S+S">Sultan S Alshamrani</a>, <a href="/search/cs?searchtype=author&amp;query=Masud%2C+M">Mehedi Masud</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+Z">Zhu Han</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2003.04551v1-abstract-short" style="display: inline;"> uRLLC and eMBB are two influential services of the emerging 5G cellular network. Latency and reliability are major concerns for uRLLC applications, whereas eMBB services claim for the maximum data rates. Owing to the trade-off among latency, reliability and spectral efficiency, sharing of radio resources between eMBB and uRLLC services, heads to a challenging scheduling dilemma. In this paper, we&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.04551v1-abstract-full').style.display = 'inline'; document.getElementById('2003.04551v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2003.04551v1-abstract-full" style="display: none;"> uRLLC and eMBB are two influential services of the emerging 5G cellular network. Latency and reliability are major concerns for uRLLC applications, whereas eMBB services claim for the maximum data rates. Owing to the trade-off among latency, reliability and spectral efficiency, sharing of radio resources between eMBB and uRLLC services, heads to a challenging scheduling dilemma. In this paper, we study the co-scheduling problem of eMBB and uRLLC traffic based upon the puncturing technique. Precisely, we formulate an optimization problem aiming to maximize the MEAR of eMBB UEs while fulfilling the provisions of the uRLLC traffic. We decompose the original problem into two sub-problems, namely scheduling problem of eMBB UEs and uRLLC UEs while prevailing objective unchanged. Radio resources are scheduled among the eMBB UEs on a time slot basis, whereas it is handled for uRLLC UEs on a mini-slot basis. Moreover, for resolving the scheduling issue of eMBB UEs, we use PSUM based algorithm, whereas the optimal TM is adopted for solving the same problem of uRLLC UEs. Furthermore, a heuristic algorithm is also provided to solve the first sub-problem with lower complexity. Finally, the significance of the proposed approach over other baseline approaches is established through numerical analysis in terms of the MEAR and fairness scores of the eMBB UEs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.04551v1-abstract-full').style.display = 'none'; document.getElementById('2003.04551v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 March, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">30 pages, 11 figures, IEEE Transactions on Communications</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2003.02157">arXiv:2003.02157</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2003.02157">pdf</a>, <a href="https://arxiv.org/format/2003.02157">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TNSM.2021.3049381">10.1109/TNSM.2021.3049381 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Risk-Aware Energy Scheduling for Edge Computing with Microgrid: A Multi-Agent Deep Reinforcement Learning Approach </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Munir%2C+M+S">Md. Shirajum Munir</a>, <a href="/search/cs?searchtype=author&amp;query=Abedin%2C+S+F">Sarder Fakhrul Abedin</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+Z">Zhu Han</a>, <a href="/search/cs?searchtype=author&amp;query=Huh%2C+E">Eui-Nam Huh</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2003.02157v3-abstract-short" style="display: inline;"> In recent years, multi-access edge computing (MEC) is a key enabler for handling the massive expansion of Internet of Things (IoT) applications and services. However, energy consumption of a MEC network depends on volatile tasks that induces risk for energy demand estimations. As an energy supplier, a microgrid can facilitate seamless energy supply. However, the risk associated with energy supply&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.02157v3-abstract-full').style.display = 'inline'; document.getElementById('2003.02157v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2003.02157v3-abstract-full" style="display: none;"> In recent years, multi-access edge computing (MEC) is a key enabler for handling the massive expansion of Internet of Things (IoT) applications and services. However, energy consumption of a MEC network depends on volatile tasks that induces risk for energy demand estimations. As an energy supplier, a microgrid can facilitate seamless energy supply. However, the risk associated with energy supply is also increased due to unpredictable energy generation from renewable and non-renewable sources. Especially, the risk of energy shortfall is involved with uncertainties in both energy consumption and generation. In this paper, we study a risk-aware energy scheduling problem for a microgrid-powered MEC network. First, we formulate an optimization problem considering the conditional value-at-risk (CVaR) measurement for both energy consumption and generation, where the objective is to minimize the expected residual of scheduled energy for the MEC networks and we show this problem is an NP-hard problem. Second, we analyze our formulated problem using a multi-agent stochastic game that ensures the joint policy Nash equilibrium, and show the convergence of the proposed model. Third, we derive the solution by applying a multi-agent deep reinforcement learning (MADRL)-based asynchronous advantage actor-critic (A3C) algorithm with shared neural networks. This method mitigates the curse of dimensionality of the state space and chooses the best policy among the agents for the proposed problem. Finally, the experimental results establish a significant performance gain by considering CVaR for high accuracy energy scheduling of the proposed model than both the single and random agent models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.02157v3-abstract-full').style.display = 'none'; document.getElementById('2003.02157v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted Article BY IEEE Transactions on Network and Service Management, DOI: 10.1109/TNSM.2021.3049381</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2002.08567">arXiv:2002.08567</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2002.08567">pdf</a>, <a href="https://arxiv.org/format/2002.08567">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TNSM.2021.3057960">10.1109/TNSM.2021.3057960 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Multi-Agent Meta-Reinforcement Learning for Self-Powered and Sustainable Edge Computing Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Munir%2C+M+S">Md. Shirajum Munir</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Saad%2C+W">Walid Saad</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2002.08567v3-abstract-short" style="display: inline;"> The stringent requirements of mobile edge computing (MEC) applications and functions fathom the high capacity and dense deployment of MEC hosts to the upcoming wireless networks. However, operating such high capacity MEC hosts can significantly increase energy consumption. Thus, a base station (BS) unit can act as a self-powered BS. In this paper, an effective energy dispatch mechanism for self-po&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.08567v3-abstract-full').style.display = 'inline'; document.getElementById('2002.08567v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2002.08567v3-abstract-full" style="display: none;"> The stringent requirements of mobile edge computing (MEC) applications and functions fathom the high capacity and dense deployment of MEC hosts to the upcoming wireless networks. However, operating such high capacity MEC hosts can significantly increase energy consumption. Thus, a base station (BS) unit can act as a self-powered BS. In this paper, an effective energy dispatch mechanism for self-powered wireless networks with edge computing capabilities is studied. First, a two-stage linear stochastic programming problem is formulated with the goal of minimizing the total energy consumption cost of the system while fulfilling the energy demand. Second, a semi-distributed data-driven solution is proposed by developing a novel multi-agent meta-reinforcement learning (MAMRL) framework to solve the formulated problem. In particular, each BS plays the role of a local agent that explores a Markovian behavior for both energy consumption and generation while each BS transfers time-varying features to a meta-agent. Sequentially, the meta-agent optimizes (i.e., exploits) the energy dispatch decision by accepting only the observations from each local agent with its own state information. Meanwhile, each BS agent estimates its own energy dispatch policy by applying the learned parameters from meta-agent. Finally, the proposed MAMRL framework is benchmarked by analyzing deterministic, asymmetric, and stochastic environments in terms of non-renewable energy usages, energy cost, and accuracy. Experimental results show that the proposed MAMRL model can reduce up to 11% non-renewable energy usage and by 22.4% the energy cost (with 95.8% prediction accuracy), compared to other baseline methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.08567v3-abstract-full').style.display = 'none'; document.getElementById('2002.08567v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted article by IEEE Transactions on Network and Service Management, DOI: 10.1109/TNSM.2021.3057960. Copyright 2021 IEEE</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1911.05642">arXiv:1911.05642</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1911.05642">pdf</a>, <a href="https://arxiv.org/format/1911.05642">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> Federated Learning for Edge Networks: Resource Optimization and Incentive Mechanism </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Khan%2C+L+U">Latif U. Khan</a>, <a href="/search/cs?searchtype=author&amp;query=Pandey%2C+S+R">Shashi Raj Pandey</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Saad%2C+W">Walid Saad</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+Z">Zhu Han</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+M+N+H">Minh N. H. Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1911.05642v3-abstract-short" style="display: inline;"> Recent years have witnessed a rapid proliferation of smart Internet of Things (IoT) devices. IoT devices with intelligence require the use of effective machine learning paradigms. Federated learning can be a promising solution for enabling IoT-based smart applications. In this paper, we present the primary design aspects for enabling federated learning at network edge. We model the incentive-based&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.05642v3-abstract-full').style.display = 'inline'; document.getElementById('1911.05642v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1911.05642v3-abstract-full" style="display: none;"> Recent years have witnessed a rapid proliferation of smart Internet of Things (IoT) devices. IoT devices with intelligence require the use of effective machine learning paradigms. Federated learning can be a promising solution for enabling IoT-based smart applications. In this paper, we present the primary design aspects for enabling federated learning at network edge. We model the incentive-based interaction between a global server and participating devices for federated learning via a Stackelberg game to motivate the participation of the devices in the federated learning process. We present several open research challenges with their possible solutions. Finally, we provide an outlook on future research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.05642v3-abstract-full').style.display = 'none'; document.getElementById('1911.05642v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 September, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 November, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The first two authors contributed equally. This article has been accepted for publication in IEEE Communications Magazine</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1911.01046">arXiv:1911.01046</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1911.01046">pdf</a>, <a href="https://arxiv.org/ps/1911.01046">ps</a>, <a href="https://arxiv.org/format/1911.01046">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Science and Game Theory">cs.GT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TWC.2020.2971981">10.1109/TWC.2020.2971981 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> A Crowdsourcing Framework for On-Device Federated Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pandey%2C+S+R">Shashi Raj Pandey</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Bennis%2C+M">Mehdi Bennis</a>, <a href="/search/cs?searchtype=author&amp;query=Tun%2C+Y+K">Yan Kyaw Tun</a>, <a href="/search/cs?searchtype=author&amp;query=Manzoor%2C+A">Aunas Manzoor</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1911.01046v2-abstract-short" style="display: inline;"> Federated learning (FL) rests on the notion of training a global model in a decentralized manner. Under this setting, mobile devices perform computations on their local data before uploading the required updates to improve the global model. However, when the participating clients implement an uncoordinated computation strategy, the difficulty is to handle the communication efficiency (i.e., the nu&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.01046v2-abstract-full').style.display = 'inline'; document.getElementById('1911.01046v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1911.01046v2-abstract-full" style="display: none;"> Federated learning (FL) rests on the notion of training a global model in a decentralized manner. Under this setting, mobile devices perform computations on their local data before uploading the required updates to improve the global model. However, when the participating clients implement an uncoordinated computation strategy, the difficulty is to handle the communication efficiency (i.e., the number of communications per iteration) while exchanging the model parameters during aggregation. Therefore, a key challenge in FL is how users participate to build a high-quality global model with communication efficiency. We tackle this issue by formulating a utility maximization problem, and propose a novel crowdsourcing framework to leverage FL that considers the communication efficiency during parameters exchange. First, we show an incentive-based interaction between the crowdsourcing platform and the participating client&#39;s independent strategies for training a global learning model, where each side maximizes its own benefit. We formulate a two-stage Stackelberg game to analyze such scenario and find the game&#39;s equilibria. Second, we formalize an admission control scheme for participating clients to ensure a level of local accuracy. Simulated results demonstrate the efficacy of our proposed solution with up to 22% gain in the offered reward. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.01046v2-abstract-full').style.display = 'none'; document.getElementById('1911.01046v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 November, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in IEEE Transactions on Wireless Communications</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1910.13067">arXiv:1910.13067</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1910.13067">pdf</a>, <a href="https://arxiv.org/format/1910.13067">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TNET.2020.3035770">10.1109/TNET.2020.3035770 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Federated Learning over Wireless Networks: Convergence Analysis and Resource Allocation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Dinh%2C+C+T">Canh T. Dinh</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+M+N+H">Minh N. H. Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a>, <a href="/search/cs?searchtype=author&amp;query=Bao%2C+W">Wei Bao</a>, <a href="/search/cs?searchtype=author&amp;query=Zomaya%2C+A+Y">Albert Y. Zomaya</a>, <a href="/search/cs?searchtype=author&amp;query=Gramoli%2C+V">Vincent Gramoli</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1910.13067v4-abstract-short" style="display: inline;"> There is an increasing interest in a fast-growing machine learning technique called Federated Learning, in which the model training is distributed over mobile user equipments (UEs), exploiting UEs&#39; local computation and training data. Despite its advantages in data privacy-preserving, Federated Learning (FL) still has challenges in heterogeneity across UEs&#39; data and physical resources. We first pr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1910.13067v4-abstract-full').style.display = 'inline'; document.getElementById('1910.13067v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1910.13067v4-abstract-full" style="display: none;"> There is an increasing interest in a fast-growing machine learning technique called Federated Learning, in which the model training is distributed over mobile user equipments (UEs), exploiting UEs&#39; local computation and training data. Despite its advantages in data privacy-preserving, Federated Learning (FL) still has challenges in heterogeneity across UEs&#39; data and physical resources. We first propose a FL algorithm which can handle the heterogeneous UEs&#39; data challenge without further assumptions except strongly convex and smooth loss functions. We provide the convergence rate characterizing the trade-off between local computation rounds of UE to update its local model and global communication rounds to update the FL global model. We then employ the proposed FL algorithm in wireless networks as a resource allocation optimization problem that captures the trade-off between the FL convergence wall clock time and energy consumption of UEs with heterogeneous computing and power resources. Even though the wireless resource allocation problem of FL is non-convex, we exploit this problem&#39;s structure to decompose it into three sub-problems and analyze their closed-form solutions as well as insights to problem design. Finally, we illustrate the theoretical analysis for the new algorithm with Tensorflow experiments and extensive numerical results for the wireless resource allocation sub-problems. The experiment results not only verify the theoretical convergence but also show that our proposed algorithm outperforms the vanilla FedAvg algorithm in terms of convergence rate and testing accuracy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1910.13067v4-abstract-full').style.display = 'none'; document.getElementById('1910.13067v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 October, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1909.12567">arXiv:1909.12567</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1909.12567">pdf</a>, <a href="https://arxiv.org/ps/1909.12567">ps</a>, <a href="https://arxiv.org/format/1909.12567">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Cell-Free Massive MIMO for Wireless Federated Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Vu%2C+T+T">Tung T. Vu</a>, <a href="/search/cs?searchtype=author&amp;query=Ngo%2C+D+T">Duy T. Ngo</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Ngo%2C+H+Q">Hien Quoc Ngo</a>, <a href="/search/cs?searchtype=author&amp;query=Dao%2C+M+N">Minh N. Dao</a>, <a href="/search/cs?searchtype=author&amp;query=Middleton%2C+R+H">Richard H. Middleton</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1909.12567v5-abstract-short" style="display: inline;"> This paper proposes a novel scheme for cell-free massive multiple-input multiple-output (CFmMIMO) networks to support any federated learning (FL) framework. This scheme allows each instead of all the iterations of the FL framework to happen in a large-scale coherence time to guarantee a stable operation of an FL process. To show how to optimize the FL performance using this proposed scheme, we con&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.12567v5-abstract-full').style.display = 'inline'; document.getElementById('1909.12567v5-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1909.12567v5-abstract-full" style="display: none;"> This paper proposes a novel scheme for cell-free massive multiple-input multiple-output (CFmMIMO) networks to support any federated learning (FL) framework. This scheme allows each instead of all the iterations of the FL framework to happen in a large-scale coherence time to guarantee a stable operation of an FL process. To show how to optimize the FL performance using this proposed scheme, we consider an existing FL framework as an example and target FL training time minimization for this framework. An optimization problem is then formulated to jointly optimize the local accuracy, transmit power, data rate, and users&#39; processing frequency. This mixed-timescale stochastic nonconvex problem captures the complex interactions among the training time, and transmission and computation of training updates of one FL process. By employing the online successive convex approximation approach, we develop a new algorithm to solve the formulated problem with proven convergence to the neighbourhood of its stationary points. Our numerical results confirm that the presented joint design reduces the training time by up to $55\%$ over baseline approaches. They also show that CFmMIMO here requires the lowest training time for FL processes compared with cell-free time-division multiple access massive MIMO and collocated massive MIMO. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.12567v5-abstract-full').style.display = 'none'; document.getElementById('1909.12567v5-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 September, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">IEEE Transactions on Wireless Communications, accepted for publication</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1909.08747">arXiv:1909.08747</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1909.08747">pdf</a>, <a href="https://arxiv.org/format/1909.08747">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> Edge-Computing-Enabled Smart Cities: A Comprehensive Survey </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Khan%2C+L+U">Latif U. Khan</a>, <a href="/search/cs?searchtype=author&amp;query=Yaqoob%2C+I">Ibrar Yaqoob</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Kazmi%2C+S+M+A">S. M. Ahsan Kazmi</a>, <a href="/search/cs?searchtype=author&amp;query=Dang%2C+T+N">Tri Nguyen Dang</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1909.08747v2-abstract-short" style="display: inline;"> Recent years have disclosed a remarkable proliferation of compute-intensive applications in smart cities. Such applications continuously generate enormous amounts of data which demand strict latency-aware computational processing capabilities. Although edge computing is an appealing technology to compensate for stringent latency related issues, its deployment engenders new challenges. In this surv&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.08747v2-abstract-full').style.display = 'inline'; document.getElementById('1909.08747v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1909.08747v2-abstract-full" style="display: none;"> Recent years have disclosed a remarkable proliferation of compute-intensive applications in smart cities. Such applications continuously generate enormous amounts of data which demand strict latency-aware computational processing capabilities. Although edge computing is an appealing technology to compensate for stringent latency related issues, its deployment engenders new challenges. In this survey, we highlight the role of edge computing in realizing the vision of smart cities. First, we analyze the evolution of edge computing paradigms. Subsequently, we critically review the state-of-the-art literature focusing on edge computing applications in smart cities. Later, we categorize and classify the literature by devising a comprehensive and meticulous taxonomy. Furthermore, we identify and discuss key requirements, and enumerate recently reported synergies of edge computing enabled smart cities. Finally, several indispensable open challenges along with their causes and guidelines are discussed, serving as future research directions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.08747v2-abstract-full').style.display = 'none'; document.getElementById('1909.08747v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 September, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1908.10229">arXiv:1908.10229</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1908.10229">pdf</a>, <a href="https://arxiv.org/format/1908.10229">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> A Security-Aware Access Model for Data-Driven EHR System </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Ngoc Hong Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen-Ngoc%2C+T">Thien-An Nguyen-Ngoc</a>, <a href="/search/cs?searchtype=author&amp;query=Le-Khac%2C+N">Nhien-An Le-Khac</a>, <a href="/search/cs?searchtype=author&amp;query=Kechadi%2C+M">M-Tahar Kechadi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1908.10229v1-abstract-short" style="display: inline;"> Digital healthcare systems are very popular lately, as they provide a variety of helpful means to monitor people&#39;s health state as well as to protect people against an unexpected health situation. These systems contain a huge amount of personal information in a form of electronic health records that are not allowed to be disclosed to unauthorized users. Hence, health data and information need to b&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1908.10229v1-abstract-full').style.display = 'inline'; document.getElementById('1908.10229v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1908.10229v1-abstract-full" style="display: none;"> Digital healthcare systems are very popular lately, as they provide a variety of helpful means to monitor people&#39;s health state as well as to protect people against an unexpected health situation. These systems contain a huge amount of personal information in a form of electronic health records that are not allowed to be disclosed to unauthorized users. Hence, health data and information need to be protected against attacks and thefts. In this paper, we propose a secure distributed architecture for healthcare data storage and analysis. It uses a novel security model to rigorously control permissions of accessing sensitive data in the system, as well as to protect the transmitted data between distributed system servers and nodes. The model also satisfies the NIST security requirements. Thorough experimental results show that the model is very promising. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1908.10229v1-abstract-full').style.display = 'none'; document.getElementById('1908.10229v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 August, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages, 12 figures, 3 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1907.07223">arXiv:1907.07223</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1907.07223">pdf</a>, <a href="https://arxiv.org/format/1907.07223">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-030-27615-7_20">10.1007/978-3-030-27615-7_20 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Fairness-enhancing interventions in stream classification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Iosifidis%2C+V">Vasileios Iosifidis</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+T+N+H">Thi Ngoc Han Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Ntoutsi%2C+E">Eirini Ntoutsi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1907.07223v1-abstract-short" style="display: inline;"> The wide spread usage of automated data-driven decision support systems has raised a lot of concerns regarding accountability and fairness of the employed models in the absence of human supervision. Existing fairness-aware approaches tackle fairness as a batch learning problem and aim at learning a fair model which can then be applied to future instances of the problem. In many applications, howev&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.07223v1-abstract-full').style.display = 'inline'; document.getElementById('1907.07223v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1907.07223v1-abstract-full" style="display: none;"> The wide spread usage of automated data-driven decision support systems has raised a lot of concerns regarding accountability and fairness of the employed models in the absence of human supervision. Existing fairness-aware approaches tackle fairness as a batch learning problem and aim at learning a fair model which can then be applied to future instances of the problem. In many applications, however, the data comes sequentially and its characteristics might evolve with time. In such a setting, it is counter-intuitive to &#34;fix&#34; a (fair) model over the data stream as changes in the data might incur changes in the underlying model therefore, affecting its fairness. In this work, we propose fairness-enhancing interventions that modify the input data so that the outcome of any stream classifier applied to that data will be fair. Experiments on real and synthetic data show that our approach achieves good predictive performance and low discrimination scores over the course of the stream. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.07223v1-abstract-full').style.display = 'none'; document.getElementById('1907.07223v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 July, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages, 7 figures. To appear in the proceedings of 30th International Conference on Database and Expert Systems Applications, Linz, Austria August 26 - 29, 2019</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1907.02182">arXiv:1907.02182</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1907.02182">pdf</a>, <a href="https://arxiv.org/ps/1907.02182">ps</a>, <a href="https://arxiv.org/format/1907.02182">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/JSAC.2019.2927100">10.1109/JSAC.2019.2927100 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Wireless Network Slicing: Generalized Kelly Mechanism Based Resource Allocation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tun%2C+Y+K">Yan Kyaw Tun</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Ngo%2C+D+T">Duy Trong Ngo</a>, <a href="/search/cs?searchtype=author&amp;query=Pandey%2C+S+R">Shashi Raj Pandey</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+Z">Zhu Han</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1907.02182v2-abstract-short" style="display: inline;"> Wireless network slicing (i.e., network virtualization) is one of the potential technologies for addressing the issue of rapidly growing demand in mobile data services related to 5G cellular networks. It logically decouples the current cellular networks into two entities; infrastructure providers (InPs) and mobile virtual network operators (MVNOs). The resources of base stations (e.g., resource bl&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.02182v2-abstract-full').style.display = 'inline'; document.getElementById('1907.02182v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1907.02182v2-abstract-full" style="display: none;"> Wireless network slicing (i.e., network virtualization) is one of the potential technologies for addressing the issue of rapidly growing demand in mobile data services related to 5G cellular networks. It logically decouples the current cellular networks into two entities; infrastructure providers (InPs) and mobile virtual network operators (MVNOs). The resources of base stations (e.g., resource blocks, transmission power, antennas) which are owned by the InP are shared to multiple MVNOs who need resources for their mobile users. Specifically, the physical resources of an InP are abstracted into multiple isolated network slices, which are then allocated to MVNO&#39;s mobile users. In this paper, two-level allocation problem in network slicing is examined, whilst enabling efficient resource utilization, inter-slice isolation (i.e., no interference amongst slices), and intra-slice isolation (i.e., no interference between users in the same slice). A generalized Kelly mechanism (GKM) is also designed, based on which the upper level of the resource allocation issue (i.e., between the InP and MVNOs) is addressed. The benefit of using such a resource bidding and allocation framework is that the seller (InP) does not need to know the true valuation of the bidders (MVNOs). For solving the lower level of resource allocation issue (i.e., between MVNOs and their mobile users), the optimal resource allocation is derived from each MVNO to its mobile users by using KKT conditions. Then, bandwidth resources are allocated to the users of MVNOs. Finally, the results of simulation are presented to verify the theoretical analysis of our proposed two-level resource allocation problem in wireless network slicing. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.02182v2-abstract-full').style.display = 'none'; document.getElementById('1907.02182v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 July, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 July, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 13 figures, Accepted in IEEE Journal on Selected Areas in Communications - Special Issue on Network Softwarization &amp; Enablers</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1906.04090">arXiv:1906.04090</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1906.04090">pdf</a>, <a href="https://arxiv.org/ps/1906.04090">ps</a>, <a href="https://arxiv.org/format/1906.04090">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Supervised and Semi-Supervised Learning for MIMO Blind Detection with Low-Resolution ADCs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+L+V">Ly V. Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Ngo%2C+D+T">Duy T. Ngo</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nghi H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Swindlehurst%2C+A+L">A. Lee Swindlehurst</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+D+H+N">Duy H. N. Nguyen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1906.04090v1-abstract-short" style="display: inline;"> The use of low-resolution analog-to-digital converters (ADCs) is considered to be an effective technique to reduce the power consumption and hardware complexity of wireless transceivers. However, in systems with low-resolution ADCs, obtaining channel state information (CSI) is difficult due to significant distortions in the received signals. The primary motivation of this paper is to show that lea&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1906.04090v1-abstract-full').style.display = 'inline'; document.getElementById('1906.04090v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1906.04090v1-abstract-full" style="display: none;"> The use of low-resolution analog-to-digital converters (ADCs) is considered to be an effective technique to reduce the power consumption and hardware complexity of wireless transceivers. However, in systems with low-resolution ADCs, obtaining channel state information (CSI) is difficult due to significant distortions in the received signals. The primary motivation of this paper is to show that learning techniques can mitigate the impact of CSI unavailability. We study the blind detection problem in multiple-input-multiple-output (MIMO) systems with low-resolution ADCs using learning approaches. Two methods, which employ a sequence of pilot symbol vectors as the initial training data, are proposed. The first method exploits the use of a cyclic redundancy check (CRC) to obtain more training data, which helps improve the detection accuracy. The second method is based on the perspective that the to-be-decoded data can itself assist the learning process, so no further training information is required except the pilot sequence. For the case of 1-bit ADCs, we provide a performance analysis of the vector error rate for the proposed methods. Based on the analytical results, a criterion for designing transmitted signals is also presented. Simulation results show that the proposed methods outperform existing techniques and are also more robust. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1906.04090v1-abstract-full').style.display = 'none'; document.getElementById('1906.04090v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 June, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 10 figures, submitted for journal publication</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1904.08514">arXiv:1904.08514</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1904.08514">pdf</a>, <a href="https://arxiv.org/format/1904.08514">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Biomolecules">q-bio.BM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> DeepNovoV2: Better de novo peptide sequencing with deep learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Qiao%2C+R">Rui Qiao</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Ngoc Hieu Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Xin%2C+L">Lei Xin</a>, <a href="/search/cs?searchtype=author&amp;query=Shan%2C+B">Baozhen Shan</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+M">Ming Li</a>, <a href="/search/cs?searchtype=author&amp;query=Ghodsi%2C+A">Ali Ghodsi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1904.08514v2-abstract-short" style="display: inline;"> Personalized cancer vaccines are envisioned as the next generation rational cancer immunotherapy. The key step in developing personalized therapeutic cancer vaccines is to identify tumor-specific neoantigens that are on the surface of tumor cells. A promising method for this is through de novo peptide sequencing from mass spectrometry data. In this paper we introduce DeepNovoV2, the state-of-the-a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.08514v2-abstract-full').style.display = 'inline'; document.getElementById('1904.08514v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1904.08514v2-abstract-full" style="display: none;"> Personalized cancer vaccines are envisioned as the next generation rational cancer immunotherapy. The key step in developing personalized therapeutic cancer vaccines is to identify tumor-specific neoantigens that are on the surface of tumor cells. A promising method for this is through de novo peptide sequencing from mass spectrometry data. In this paper we introduce DeepNovoV2, the state-of-the-art model for peptide sequencing. In DeepNovoV2, a spectrum is directly represented as a set of (m/z, intensity) pairs, therefore it does not suffer from the accuracy-speed/memory trade-off problem. The model combines an order invariant network structure (T-Net) and recurrent neural networks and provides a complete end-to-end training and prediction framework to sequence patterns of peptides. Our experiments on a wide variety of data from different species show that DeepNovoV2 outperforms previous state-of-the-art methods, achieving 13.01-23.95\% higher accuracy at the peptide level. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.08514v2-abstract-full').style.display = 'none'; document.getElementById('1904.08514v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 May, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 April, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1902.01648">arXiv:1902.01648</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1902.01648">pdf</a>, <a href="https://arxiv.org/format/1902.01648">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/LCOMM.2019.2900044">10.1109/LCOMM.2019.2900044 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> eMBB-URLLC Resource Slicing: A Risk-Sensitive Approach </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Alsenwi%2C+M">Madyan Alsenwi</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Bennis%2C+M">Mehdi Bennis</a>, <a href="/search/cs?searchtype=author&amp;query=Bairagi%2C+A+K">Anupam Kumar Bairagi</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1902.01648v1-abstract-short" style="display: inline;"> Ultra Reliable Low Latency Communication (URLLC) is a 5G New Radio (NR) application that requires strict reliability and latency. URLLC traffic is usually scheduled on top of the ongoing enhanced Mobile Broadband (eMBB) transmissions (i.e., puncturing the current eMBB transmission) and cannot be queued due to its hard latency requirements. In this letter, we propose a risk-sensitive based formulat&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1902.01648v1-abstract-full').style.display = 'inline'; document.getElementById('1902.01648v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1902.01648v1-abstract-full" style="display: none;"> Ultra Reliable Low Latency Communication (URLLC) is a 5G New Radio (NR) application that requires strict reliability and latency. URLLC traffic is usually scheduled on top of the ongoing enhanced Mobile Broadband (eMBB) transmissions (i.e., puncturing the current eMBB transmission) and cannot be queued due to its hard latency requirements. In this letter, we propose a risk-sensitive based formulation to allocate resources to the incoming URLLC traffic while minimizing the risk of the eMBB transmission (i.e., protecting the eMBB users with low data rate) and ensuring URLLC reliability. Specifically, the Conditional Value at Risk (CVaR) is introduced as a risk measure for eMBB transmission. Moreover, the reliability constraint of URLLC is formulated as a chance constraint and relaxed based on Markov&#39;s inequality. We decompose the formulated problem into two subproblems in order to transform it into a convex form and then alternatively solve them until convergence. Simulation results show that the proposed approach allocates resources to the incoming URLLC traffic efficiently while satisfying the reliability of both eMBB and URLLC. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1902.01648v1-abstract-full').style.display = 'none'; document.getElementById('1902.01648v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 February, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1901.00415">arXiv:1901.00415</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1901.00415">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Deep Autoencoder for Recommender Systems: Parameter Influence Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tran%2C+D+H">Dai Hoang Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Hussain%2C+Z">Zawar Hussain</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+W+E">Wei Emma Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Khoa%2C+N+L+D">Nguyen Lu Dang Khoa</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Sheng%2C+Q+Z">Quan Z. Sheng</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1901.00415v1-abstract-short" style="display: inline;"> Recommender systems have recently attracted many researchers in the deep learning community. The state-of-the-art deep neural network models used in recommender systems are typically multilayer perceptron and deep Autoencoder (DAE), among which DAE usually shows better performance due to its superior capability to reconstruct the inputs. However, we found existing DAE recommendation systems that h&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.00415v1-abstract-full').style.display = 'inline'; document.getElementById('1901.00415v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1901.00415v1-abstract-full" style="display: none;"> Recommender systems have recently attracted many researchers in the deep learning community. The state-of-the-art deep neural network models used in recommender systems are typically multilayer perceptron and deep Autoencoder (DAE), among which DAE usually shows better performance due to its superior capability to reconstruct the inputs. However, we found existing DAE recommendation systems that have similar implementations on similar datasets result in vastly different parameter settings. In this work, we have built a flexible DAE model, named FlexEncoder that uses configurable parameters and unique features to analyse the parameter influences on the prediction accuracy of recommender systems. This will help us identify the best-performance parameters given a dataset. Extensive evaluation on the MovieLens datasets are conducted, which drives our conclusions on the influences of DAE parameters. Specifically, we find that DAE parameters strongly affect the prediction accuracy of the recommender systems, and the effect is transferable to similar datasets in a larger size. We open our code to public which could benefit both new users for DAE -- they can quickly understand how DAE works for recommendation systems, and experienced DAE users -- it easier for them to tune the parameters on different datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.00415v1-abstract-full').style.display = 'none'; document.getElementById('1901.00415v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 December, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, ACIS 2018,</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1812.04177">arXiv:1812.04177</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1812.04177">pdf</a>, <a href="https://arxiv.org/format/1812.04177">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> Ruin Theory for Dynamic Spectrum Allocation in LTE-U Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Manzoor%2C+A">Aunas Manzoor</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Saad%2C+W">Walid Saad</a>, <a href="/search/cs?searchtype=author&amp;query=Kazmi%2C+S+M+A">S. M. Ahsan Kazmi</a>, <a href="/search/cs?searchtype=author&amp;query=Pandey%2C+S+R">Shashi Raj Pandey</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1812.04177v1-abstract-short" style="display: inline;"> LTE in the unlicensed band (LTE-U) is a promising solution to overcome the scarcity of the wireless spectrum. However, to reap the benefits of LTE-U, it is essential to maintain its effective coexistence with WiFi systems. Such a coexistence, hence, constitutes a major challenge for LTE-U deployment. In this paper, the problem of unlicensed spectrum sharing among WiFi and LTE-U system is studied.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1812.04177v1-abstract-full').style.display = 'inline'; document.getElementById('1812.04177v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1812.04177v1-abstract-full" style="display: none;"> LTE in the unlicensed band (LTE-U) is a promising solution to overcome the scarcity of the wireless spectrum. However, to reap the benefits of LTE-U, it is essential to maintain its effective coexistence with WiFi systems. Such a coexistence, hence, constitutes a major challenge for LTE-U deployment. In this paper, the problem of unlicensed spectrum sharing among WiFi and LTE-U system is studied. In particular, a fair time sharing model based on \emph{ruin theory} is proposed to share redundant spectral resources from the unlicensed band with LTE-U without jeopardizing the performance of the WiFi system. Fairness among both WiFi and LTE-U is maintained by applying the concept of the probability of ruin. In particular, the probability of ruin is used to perform efficient duty-cycle allocation in LTE-U, so as to provide fairness to the WiFi system and maintain certain WiFi performance. Simulation results show that the proposed ruin-based algorithm provides better fairness to the WiFi system as compared to equal duty-cycle sharing among WiFi and LTE-U. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1812.04177v1-abstract-full').style.display = 'none'; document.getElementById('1812.04177v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 December, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in IEEE Communications Letters (09-Dec 2018)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1810.01548">arXiv:1810.01548</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1810.01548">pdf</a>, <a href="https://arxiv.org/ps/1810.01548">ps</a>, <a href="https://arxiv.org/format/1810.01548">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> Deep Learning Based Caching for Self-Driving Car in Multi-access Edge Computing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ndikumana%2C+A">Anselme Ndikumana</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1810.01548v2-abstract-short" style="display: inline;"> Once self-driving car becomes a reality and passengers are no longer worry about it, they will need to find new ways of entertainment. However, retrieving entertainment contents at the Data Center (DC) can hinder content delivery service due to high delay of car-to-DC communication. To address these challenges, we propose a deep learning based caching for self-driving car, by using Deep Learning a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1810.01548v2-abstract-full').style.display = 'inline'; document.getElementById('1810.01548v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1810.01548v2-abstract-full" style="display: none;"> Once self-driving car becomes a reality and passengers are no longer worry about it, they will need to find new ways of entertainment. However, retrieving entertainment contents at the Data Center (DC) can hinder content delivery service due to high delay of car-to-DC communication. To address these challenges, we propose a deep learning based caching for self-driving car, by using Deep Learning approaches deployed on the Multi-access Edge Computing (MEC) structure. First, at DC, Multi-Layer Perceptron (MLP) is used to predict the probabilities of contents to be requested in specific areas. To reduce the car-DC delay, MLP outputs are logged into MEC servers attached to roadside units. Second, in order to cache entertainment contents stylized for car passengers&#39; features such as age and gender, Convolutional Neural Network (CNN) is used to predict age and gender of passengers. Third, each car requests MLP output from MEC server and compares its CNN and MLP outputs by using k-means and binary classification. Through this, the self-driving car can identify the contents need to be downloaded from the MEC server and cached. Finally, we formulate deep learning based caching in the self-driving car that enhances entertainment services as an optimization problem whose goal is to minimize content downloading delay. To solve the formulated problem, a Block Successive Majorization-Minimization (BS-MM) technique is applied. The simulation results show that the accuracy of our prediction for the contents need to be cached in the areas of the self-driving car is achieved at 98.04% and our approach can minimize delay. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1810.01548v2-abstract-full').style.display = 'none'; document.getElementById('1810.01548v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 October, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1807.08804">arXiv:1807.08804</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1807.08804">pdf</a>, <a href="https://arxiv.org/format/1807.08804">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Performance">cs.PF</span> </div> </div> <p class="title is-5 mathjax"> GPU-based Commonsense Paradigms Reasoning for Real-Time Query Answering and Multimodal Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen Ha Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Cambria%2C+E">Erik Cambria</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1807.08804v1-abstract-short" style="display: inline;"> We utilize commonsense knowledge bases to address the problem of real- time multimodal analysis. In particular, we focus on the problem of multimodal sentiment analysis, which consists in the simultaneous analysis of different modali- ties, e.g., speech and video, for emotion and polarity detection. Our approach takes advantages of the massively parallel processing power of modern GPUs to enhance&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1807.08804v1-abstract-full').style.display = 'inline'; document.getElementById('1807.08804v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1807.08804v1-abstract-full" style="display: none;"> We utilize commonsense knowledge bases to address the problem of real- time multimodal analysis. In particular, we focus on the problem of multimodal sentiment analysis, which consists in the simultaneous analysis of different modali- ties, e.g., speech and video, for emotion and polarity detection. Our approach takes advantages of the massively parallel processing power of modern GPUs to enhance the performance of feature extraction from different modalities. In addition, in order to ex- tract important textual features from multimodal sources we generate domain-specific graphs based on commonsense knowledge and apply GPU-based graph traversal for fast feature detection. Then, powerful ELM classifiers are applied to build the senti- ment analysis model based on the extracted features. We conduct our experiments on the YouTube dataset and achieve an accuracy of 78% which outperforms all previous systems. In term of processing speed, our method shows improvements of several orders of magnitude for feature extraction compared to CPU-based counterparts. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1807.08804v1-abstract-full').style.display = 'none'; document.getElementById('1807.08804v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 July, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1806.05430">arXiv:1806.05430</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1806.05430">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.5121/ijcnc.2018.10305">10.5121/ijcnc.2018.10305 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> An Effective Privacy-Preserving Data Coding in Peer-To-Peer Network </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Ngoc Hong Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Phung%2C+C">Cao-Vien Phung</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+B+Q">Binh Quoc Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Bahri%2C+L">Leila Bahri</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1806.05430v1-abstract-short" style="display: inline;"> Coding Opportunistically (COPE) is a simple but very effective data coding mechanism in the wireless network. However, COPE leaves risks for attackers easily getting the private information saved in the packets, when they move through the network to their destination nodes. Hence in our work, a lightweight cryptographic approach, namely SCOPE, is proposed to consolidate COPE against the honest-but&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.05430v1-abstract-full').style.display = 'inline'; document.getElementById('1806.05430v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1806.05430v1-abstract-full" style="display: none;"> Coding Opportunistically (COPE) is a simple but very effective data coding mechanism in the wireless network. However, COPE leaves risks for attackers easily getting the private information saved in the packets, when they move through the network to their destination nodes. Hence in our work, a lightweight cryptographic approach, namely SCOPE, is proposed to consolidate COPE against the honest-but-curious and malicious attacks. Honest-but-curious attack serves adversaries who accurately obey the protocol but try to learn as much private information as possible for their curiosity. Additionally, this kind of attack is not destructive consequently. However, it may leave the backdoor for the more dangerous attacks carrying catastrophes to the system. Malicious attack tries to learn not only the private information but also modifies the packet on harmful purposes. In our work, the SCOPE protocol is defensive to the both attacks. The private information in the COPE packet are encrypted by Elliptic Curve Cryptography (ECC), and an additional information is inserted into SCOPE packets served for the authentication process using the lightweight hash Elliptic Curve Digital Signature Algorithm (ECDSA). We then prove our new protocol is still guaranteed to be a secure method of data coding, and to be light to effectively operate in the peer-to-peer wireless network <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.05430v1-abstract-full').style.display = 'none'; document.getElementById('1806.05430v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 June, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">20 pages, 9 figures, 13 references, 1 table, 3 algorithms, 6 definitions</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> International Journal of Computer Networks &amp; Communications (IJCNC) Vol.10, No.3, May 2018 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1803.11512">arXiv:1803.11512</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1803.11512">pdf</a>, <a href="https://arxiv.org/ps/1803.11512">ps</a>, <a href="https://arxiv.org/format/1803.11512">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> Joint Communication, Computation, Caching, and Control in Big Data Multi-access Edge Computing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ndikumana%2C+A">Anselme Ndikumana</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Ho%2C+T+M">Tai Manh Ho</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+Z">Zhu Han</a>, <a href="/search/cs?searchtype=author&amp;query=Saad%2C+W">Walid Saad</a>, <a href="/search/cs?searchtype=author&amp;query=Niyato%2C+D">Dusit Niyato</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1803.11512v1-abstract-short" style="display: inline;"> The concept of multi-access edge computing (MEC) has been recently introduced to supplement cloud computing by deploying MEC servers to the network edge so as to reduce the network delay and alleviate the load on cloud data centers. However, compared to a resourceful cloud, an MEC server has limited resources. When each MEC server operates independently, it cannot handle all of the computational a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1803.11512v1-abstract-full').style.display = 'inline'; document.getElementById('1803.11512v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1803.11512v1-abstract-full" style="display: none;"> The concept of multi-access edge computing (MEC) has been recently introduced to supplement cloud computing by deploying MEC servers to the network edge so as to reduce the network delay and alleviate the load on cloud data centers. However, compared to a resourceful cloud, an MEC server has limited resources. When each MEC server operates independently, it cannot handle all of the computational and big data demands stemming from the users devices. Consequently, the MEC server cannot provide significant gains in overhead reduction due to data exchange between users devices and remote cloud. Therefore, joint computing, caching, communication, and control (4C) at the edge with MEC server collaboration is strongly needed for big data applications. In order to address these challenges, in this paper, the problem of joint 4C in big data MEC is formulated as an optimization problem whose goal is to maximize the bandwidth saving while minimizing delay, subject to the local computation capability of user devices, computation deadline, and MEC resource constraints. However, the formulated problem is shown to be non-convex. To make this problem convex, a proximal upper bound problem of the original formulated problem that guarantees descent to the original problem is proposed. To solve the proximal upper bound problem, a block successive upper bound minimization (BSUM) method is applied. Simulation results show that the proposed approach increases bandwidth-saving and minimizes delay while satisfying the computation deadlines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1803.11512v1-abstract-full').style.display = 'none'; document.getElementById('1803.11512v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 March, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1803.00683">arXiv:1803.00683</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1803.00683">pdf</a>, <a href="https://arxiv.org/format/1803.00683">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> Decentralized Computation Offloading and Resource Allocation in Heterogeneous Networks with Mobile Edge Computing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pham%2C+Q">Quoc-Viet Pham</a>, <a href="/search/cs?searchtype=author&amp;query=LeAnh%2C+T">Tuan LeAnh</a>, <a href="/search/cs?searchtype=author&amp;query=Tran%2C+N+H">Nguyen H. Tran</a>, <a href="/search/cs?searchtype=author&amp;query=Hong%2C+C+S">Choong Seon Hong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1803.00683v1-abstract-short" style="display: inline;"> We consider a heterogeneous network with mobile edge computing, where a user can offload its computation to one among multiple servers. In particular, we minimize the system-wide computation overhead by jointly optimizing the individual computation decisions, transmit power of the users, and computation resource at the servers. The crux of the problem lies in the combinatorial nature of multi-user&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1803.00683v1-abstract-full').style.display = 'inline'; document.getElementById('1803.00683v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1803.00683v1-abstract-full" style="display: none;"> We consider a heterogeneous network with mobile edge computing, where a user can offload its computation to one among multiple servers. In particular, we minimize the system-wide computation overhead by jointly optimizing the individual computation decisions, transmit power of the users, and computation resource at the servers. The crux of the problem lies in the combinatorial nature of multi-user offloading decisions, the complexity of the optimization objective, and the existence of inter-cell interference. Then, we decompose the underlying problem into two subproblems: i) the offloading decision, which includes two phases of user association and subchannel assignment, and ii) joint resource allocation, which can be further decomposed into the problems of transmit power and computation resource allocation. To enable distributed computation offloading, we sequentially apply a many-to-one matching game for user association and a one-to-one matching game for subchannel assignment. Moreover, the transmit power of offloading users is found using a bisection method with approximate inter-cell interference, and the computation resources allocated to offloading users is achieved via the duality approach. The proposed algorithm is shown to converge and is stable. Finally, we provide simulations to validate the performance of the proposed algorithm as well as comparisons with the existing frameworks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1803.00683v1-abstract-full').style.display = 'none'; document.getElementById('1803.00683v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 March, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submitted to IEEE Journal</span> </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Tran%2C+N+H&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Tran%2C+N+H&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Tran%2C+N+H&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10