CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 75 results for author: <span class="mathjax">Rabiee, H R</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Rabiee%2C+H+R">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Rabiee, H R"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Rabiee%2C+H+R&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Rabiee, H R"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Rabiee%2C+H+R&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Rabiee%2C+H+R&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Rabiee%2C+H+R&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.01320">arXiv:2410.01320</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.01320">pdf</a>, <a href="https://arxiv.org/format/2410.01320">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> </div> <p class="title is-5 mathjax"> Detecting Viral Social Events through Censored Observation with Deep Survival Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ramezani%2C+M">Maryam Ramezani</a>, <a href="/search/cs?searchtype=author&amp;query=Goli%2C+H">Hossein Goli</a>, <a href="/search/cs?searchtype=author&amp;query=Izad%2C+A">AmirMohammad Izad</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.01320v1-abstract-short" style="display: inline;"> Users increasing activity across various social networks made it the most widely used platform for exchanging and propagating information among individuals. To spread information within a network, a user initially shared information on a social network, and then other users in direct contact with him might have shared that information. Information expanded throughout the network by repeatedly foll&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.01320v1-abstract-full').style.display = 'inline'; document.getElementById('2410.01320v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.01320v1-abstract-full" style="display: none;"> Users increasing activity across various social networks made it the most widely used platform for exchanging and propagating information among individuals. To spread information within a network, a user initially shared information on a social network, and then other users in direct contact with him might have shared that information. Information expanded throughout the network by repeatedly following this process. A set of information that became popular and was repeatedly shared by different individuals was called viral events. Identifying and analyzing viral social events led to valuable insights into the dynamics of information dissemination within a network. However, more importantly, proactive approaches emerged. In other words, by observing the dissemination pattern of a piece of information in the early stages of expansion, it became possible to determine whether this cascade would become viral in the future. This research aimed to predict and detect viral events in social networks by observing granular information and using a deep survival analysis-based method. This model could play a significant role in identifying rumors, predicting the impact of information, and assisting in optimal decision-making in information management and marketing. Ultimately, the proposed method was tested on various real-world datasets from Twitter, Weibo, and Digg. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.01320v1-abstract-full').style.display = 'none'; document.getElementById('2410.01320v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages. 3 figures. 6 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.08554">arXiv:2409.08554</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.08554">pdf</a>, <a href="https://arxiv.org/format/2409.08554">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> LLM-Powered Grapheme-to-Phoneme Conversion: Benchmark and Case Study </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Qharabagh%2C+M+F">Mahta Fetrat Qharabagh</a>, <a href="/search/cs?searchtype=author&amp;query=Dehghanian%2C+Z">Zahra Dehghanian</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.08554v1-abstract-short" style="display: inline;"> Grapheme-to-phoneme (G2P) conversion is critical in speech processing, particularly for applications like speech synthesis. G2P systems must possess linguistic understanding and contextual awareness of languages with polyphone words and context-dependent phonemes. Large language models (LLMs) have recently demonstrated significant potential in various language tasks, suggesting that their phonetic&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.08554v1-abstract-full').style.display = 'inline'; document.getElementById('2409.08554v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.08554v1-abstract-full" style="display: none;"> Grapheme-to-phoneme (G2P) conversion is critical in speech processing, particularly for applications like speech synthesis. G2P systems must possess linguistic understanding and contextual awareness of languages with polyphone words and context-dependent phonemes. Large language models (LLMs) have recently demonstrated significant potential in various language tasks, suggesting that their phonetic knowledge could be leveraged for G2P. In this paper, we evaluate the performance of LLMs in G2P conversion and introduce prompting and post-processing methods that enhance LLM outputs without additional training or labeled data. We also present a benchmarking dataset designed to assess G2P performance on sentence-level phonetic challenges of the Persian language. Our results show that by applying the proposed methods, LLMs can outperform traditional G2P tools, even in an underrepresented language like Persian, highlighting the potential of developing LLM-aided G2P systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.08554v1-abstract-full').style.display = 'none'; document.getElementById('2409.08554v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.07259">arXiv:2409.07259</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.07259">pdf</a>, <a href="https://arxiv.org/format/2409.07259">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> ManaTTS Persian: a recipe for creating TTS datasets for lower resource languages </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Qharabagh%2C+M+F">Mahta Fetrat Qharabagh</a>, <a href="/search/cs?searchtype=author&amp;query=Dehghanian%2C+Z">Zahra Dehghanian</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.07259v1-abstract-short" style="display: inline;"> In this study, we introduce ManaTTS, the most extensive publicly accessible single-speaker Persian corpus, and a comprehensive framework for collecting transcribed speech datasets for the Persian language. ManaTTS, released under the open CC-0 license, comprises approximately 86 hours of audio with a sampling rate of 44.1 kHz. Alongside ManaTTS, we also generated the VirgoolInformal dataset to eva&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.07259v1-abstract-full').style.display = 'inline'; document.getElementById('2409.07259v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.07259v1-abstract-full" style="display: none;"> In this study, we introduce ManaTTS, the most extensive publicly accessible single-speaker Persian corpus, and a comprehensive framework for collecting transcribed speech datasets for the Persian language. ManaTTS, released under the open CC-0 license, comprises approximately 86 hours of audio with a sampling rate of 44.1 kHz. Alongside ManaTTS, we also generated the VirgoolInformal dataset to evaluate Persian speech recognition models used for forced alignment, extending over 5 hours of audio. The datasets are supported by a fully transparent, MIT-licensed pipeline, a testament to innovation in the field. It includes unique tools for sentence tokenization, bounded audio segmentation, and a novel forced alignment method. This alignment technique is specifically designed for low-resource languages, addressing a crucial need in the field. With this dataset, we trained a Tacotron2-based TTS model, achieving a Mean Opinion Score (MOS) of 3.76, which is remarkably close to the MOS of 3.86 for the utterances generated by the same vocoder and natural spectrogram, and the MOS of 4.01 for the natural waveform, demonstrating the exceptional quality and effectiveness of the corpus. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.07259v1-abstract-full').style.display = 'none'; document.getElementById('2409.07259v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">33 pages, 12 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.02269">arXiv:2408.02269</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.02269">pdf</a>, <a href="https://arxiv.org/format/2408.02269">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> </div> </div> <p class="title is-5 mathjax"> Nonlinear Perturbation-based Non-Convex Optimization over Time-Varying Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Doostmohammadian%2C+M">Mohammadreza Doostmohammadian</a>, <a href="/search/cs?searchtype=author&amp;query=Gabidullina%2C+Z+R">Zulfiya R. Gabidullina</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.02269v1-abstract-short" style="display: inline;"> Decentralized optimization strategies are helpful for various applications, from networked estimation to distributed machine learning. This paper studies finite-sum minimization problems described over a network of nodes and proposes a computationally efficient algorithm that solves distributed convex problems and optimally finds the solution to locally non-convex objective functions. In contrast&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.02269v1-abstract-full').style.display = 'inline'; document.getElementById('2408.02269v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.02269v1-abstract-full" style="display: none;"> Decentralized optimization strategies are helpful for various applications, from networked estimation to distributed machine learning. This paper studies finite-sum minimization problems described over a network of nodes and proposes a computationally efficient algorithm that solves distributed convex problems and optimally finds the solution to locally non-convex objective functions. In contrast to batch gradient optimization in some literature, our algorithm is on a single-time scale with no extra inner consensus loop. It evaluates one gradient entry per node per time. Further, the algorithm addresses link-level nonlinearity representing, for example, logarithmic quantization of the exchanged data or clipping of the exchanged data bits. Leveraging perturbation-based theory and algebraic Laplacian network analysis proves optimal convergence and dynamics stability over time-varying and switching networks. The time-varying network setup might be due to packet drops or link failures. Despite the nonlinear nature of the dynamics, we prove exact convergence in the face of odd sign-preserving sector-bound nonlinear data transmission over the links. Illustrative numerical simulations further highlight our contributions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.02269v1-abstract-full').style.display = 'none'; document.getElementById('2408.02269v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">IEEE Transaction on Network Science and Engineering</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.01460">arXiv:2407.01460</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.01460">pdf</a>, <a href="https://arxiv.org/format/2407.01460">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> </div> </div> <p class="title is-5 mathjax"> How Clustering Affects the Convergence of Decentralized Optimization over Networks: A Monte-Carlo-based Approach </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Doostmohammadian%2C+M">Mohammadreza Doostmohammadian</a>, <a href="/search/cs?searchtype=author&amp;query=Kharazmi%2C+S">Shahaboddin Kharazmi</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.01460v1-abstract-short" style="display: inline;"> Decentralized algorithms have gained substantial interest owing to advancements in cloud computing, Internet of Things (IoT), intelligent transportation networks, and parallel processing over sensor networks. The convergence of such algorithms is directly related to specific properties of the underlying network topology. Specifically, the clustering coefficient is known to affect, for example, the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.01460v1-abstract-full').style.display = 'inline'; document.getElementById('2407.01460v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.01460v1-abstract-full" style="display: none;"> Decentralized algorithms have gained substantial interest owing to advancements in cloud computing, Internet of Things (IoT), intelligent transportation networks, and parallel processing over sensor networks. The convergence of such algorithms is directly related to specific properties of the underlying network topology. Specifically, the clustering coefficient is known to affect, for example, the controllability/observability and the epidemic growth over networks. In this work, we study the effects of the clustering coefficient on the convergence rate of networked optimization approaches. In this regard, we model the structure of large-scale distributed systems by random scale-free (SF) and clustered scale-free (CSF) networks and compare the convergence rate by tuning the network clustering coefficient. This is done by keeping other relevant network properties (such as power-law degree distribution, number of links, and average degree) unchanged. Monte-Carlo-based simulations are used to compare the convergence rate over many trials of SF graph topologies. Furthermore, to study the convergence rate over real case studies, we compare the clustering coefficient of some real-world networks with the eigenspectrum of the underlying network (as a measure of convergence rate). The results interestingly show higher convergence rate over low-clustered networks. This is significant as one can improve the learning rate of many existing decentralized machine-learning scenarios by tuning the network clustering. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.01460v1-abstract-full').style.display = 'none'; document.getElementById('2407.01460v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">SNAM Journal</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.05279">arXiv:2406.05279</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.05279">pdf</a>, <a href="https://arxiv.org/format/2406.05279">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> SuperPos-Prompt: Enhancing Soft Prompt Tuning of Language Models with Superposition of Multi Token Embeddings </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=SadraeiJavaeri%2C+M">MohammadAli SadraeiJavaeri</a>, <a href="/search/cs?searchtype=author&amp;query=Asgari%2C+E">Ehsaneddin Asgari</a>, <a href="/search/cs?searchtype=author&amp;query=McHardy%2C+A+C">Alice Carolyn McHardy</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid Reza Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.05279v1-abstract-short" style="display: inline;"> Soft prompt tuning techniques have recently gained traction as an effective strategy for the parameter-efficient tuning of pretrained language models, particularly minimizing the required adjustment of model parameters. Despite their growing use, achieving optimal tuning with soft prompts, especially for smaller datasets, remains a substantial challenge. This study makes two contributions in this&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.05279v1-abstract-full').style.display = 'inline'; document.getElementById('2406.05279v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.05279v1-abstract-full" style="display: none;"> Soft prompt tuning techniques have recently gained traction as an effective strategy for the parameter-efficient tuning of pretrained language models, particularly minimizing the required adjustment of model parameters. Despite their growing use, achieving optimal tuning with soft prompts, especially for smaller datasets, remains a substantial challenge. This study makes two contributions in this domain: (i) we introduce SuperPos-Prompt, a new reparameterization technique employing the superposition of multiple pretrained vocabulary embeddings to improve the learning of soft prompts. Our experiments across several GLUE and SuperGLUE benchmarks consistently highlight SuperPos-Prompt&#39;s superiority over Residual Prompt tuning, exhibiting an average score increase of $+6.4$ in T5-Small and $+5.0$ in T5-Base along with a faster convergence. Remarkably, SuperPos-Prompt occasionally outperforms even full fine-tuning methods. (ii) Additionally, we demonstrate enhanced performance and rapid convergence by omitting dropouts from the frozen network, yielding consistent improvements across various scenarios and tuning methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.05279v1-abstract-full').style.display = 'none'; document.getElementById('2406.05279v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.00249">arXiv:2406.00249</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.00249">pdf</a>, <a href="https://arxiv.org/format/2406.00249">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Privacy Challenges in Meta-Learning: An Investigation on Model-Agnostic Meta-Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Rafiei%2C+M">Mina Rafiei</a>, <a href="/search/cs?searchtype=author&amp;query=Maheri%2C+M">Mohammadmahdi Maheri</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.00249v1-abstract-short" style="display: inline;"> Meta-learning involves multiple learners, each dedicated to specific tasks, collaborating in a data-constrained setting. In current meta-learning methods, task learners locally learn models from sensitive data, termed support sets. These task learners subsequently share model-related information, such as gradients or loss values, which is computed using another part of the data termed query set, w&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.00249v1-abstract-full').style.display = 'inline'; document.getElementById('2406.00249v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.00249v1-abstract-full" style="display: none;"> Meta-learning involves multiple learners, each dedicated to specific tasks, collaborating in a data-constrained setting. In current meta-learning methods, task learners locally learn models from sensitive data, termed support sets. These task learners subsequently share model-related information, such as gradients or loss values, which is computed using another part of the data termed query set, with a meta-learner. The meta-learner employs this information to update its meta-knowledge. Despite the absence of explicit data sharing, privacy concerns persist. This paper examines potential data leakage in a prominent metalearning algorithm, specifically Model-Agnostic Meta-Learning (MAML). In MAML, gradients are shared between the metalearner and task-learners. The primary objective is to scrutinize the gradient and the information it encompasses about the task dataset. Subsequently, we endeavor to propose membership inference attacks targeting the task dataset containing support and query sets. Finally, we explore various noise injection methods designed to safeguard the privacy of task data and thwart potential attacks. Experimental results demonstrate the effectiveness of these attacks on MAML and the efficacy of proper noise injection methods in countering them. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.00249v1-abstract-full').style.display = 'none'; document.getElementById('2406.00249v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.08031">arXiv:2405.08031</a> <span>&nbsp;&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> </div> <p class="title is-5 mathjax"> HGTDR: Advancing Drug Repurposing with Heterogeneous Graph Transformers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Gharizadeh%2C+A">Ali Gharizadeh</a>, <a href="/search/cs?searchtype=author&amp;query=Abbasi%2C+K">Karim Abbasi</a>, <a href="/search/cs?searchtype=author&amp;query=Ghareyazi%2C+A">Amin Ghareyazi</a>, <a href="/search/cs?searchtype=author&amp;query=Mofrad%2C+M+R+K">Mohammad R. K. Mofrad</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.08031v2-abstract-short" style="display: inline;"> Motivation: Drug repurposing is a viable solution for reducing the time and cost associated with drug development. However, thus far, the proposed drug repurposing approaches still need to meet expectations. Therefore, it is crucial to offer a systematic approach for drug repurposing to achieve cost savings and enhance human lives. In recent years, using biological network-based methods for drug r&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.08031v2-abstract-full').style.display = 'inline'; document.getElementById('2405.08031v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.08031v2-abstract-full" style="display: none;"> Motivation: Drug repurposing is a viable solution for reducing the time and cost associated with drug development. However, thus far, the proposed drug repurposing approaches still need to meet expectations. Therefore, it is crucial to offer a systematic approach for drug repurposing to achieve cost savings and enhance human lives. In recent years, using biological network-based methods for drug repurposing has generated promising results. Nevertheless, these methods have limitations. Primarily, the scope of these methods is generally limited concerning the size and variety of data they can effectively handle. Another issue arises from the treatment of heterogeneous data, which needs to be addressed or converted into homogeneous data, leading to a loss of information. A significant drawback is that most of these approaches lack end-to-end functionality, necessitating manual implementation and expert knowledge in certain stages. Results: We propose a new solution, HGTDR (Heterogeneous Graph Transformer for Drug Repurposing), to address the challenges associated with drug repurposing. HGTDR is a three-step approach for knowledge graph-based drug re-purposing: 1) constructing a heterogeneous knowledge graph, 2) utilizing a heterogeneous graph transformer network, and 3) computing relationship scores using a fully connected network. By leveraging HGTDR, users gain the ability to manipulate input graphs, extract information from diverse entities, and obtain their desired output. In the evaluation step, we demonstrate that HGTDR performs comparably to previous methods. Furthermore, we review medical studies to validate our method&#39;s top ten drug repurposing suggestions, which have exhibited promising results. We also demon-strated HGTDR&#39;s capability to predict other types of relations through numerical and experimental validation, such as drug-protein and disease-protein inter-relations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.08031v2-abstract-full').style.display = 'none'; document.getElementById('2405.08031v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The paper has been archived without having permission from all authors. Please withdraw</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.07452">arXiv:2405.07452</a> <span>&nbsp;&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> PLA-SGCN: Protein-Ligand Binding Affinity Prediction by Integrating Similar Pairs and Semi-supervised Graph Convolutional Network </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Abbasi%2C+K">Karim Abbasi</a>, <a href="/search/cs?searchtype=author&amp;query=Razzaghi%2C+P">Parvin Razzaghi</a>, <a href="/search/cs?searchtype=author&amp;query=Ghareyazi%2C+A">Amin Ghareyazi</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.07452v2-abstract-short" style="display: inline;"> The protein-ligand binding affinity (PLA) prediction goal is to predict whether or not the ligand could bind to a protein sequence. Recently, in PLA prediction, deep learning has received much attention. Two steps are involved in deep learning-based approaches: feature extraction and task prediction step. Many deep learning-based approaches concentrate on introducing new feature extraction network&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.07452v2-abstract-full').style.display = 'inline'; document.getElementById('2405.07452v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.07452v2-abstract-full" style="display: none;"> The protein-ligand binding affinity (PLA) prediction goal is to predict whether or not the ligand could bind to a protein sequence. Recently, in PLA prediction, deep learning has received much attention. Two steps are involved in deep learning-based approaches: feature extraction and task prediction step. Many deep learning-based approaches concentrate on introducing new feature extraction networks or integrating auxiliary knowledge like protein-protein interaction networks or gene ontology knowledge. Then, a task prediction network is designed simply using some fully connected layers. This paper aims to integrate retrieved similar hard protein-ligand pairs in PLA prediction (i.e., task prediction step) using a semi-supervised graph convolutional network (GCN). Hard protein-ligand pairs are retrieved for each input query sample based on the manifold smoothness constraint. Then, a graph is learned automatically in which each node is a protein-ligand pair, and each edge represents the similarity between pairs. In other words, an end-to-end framework is proposed that simultaneously retrieves hard similar samples, learns protein-ligand descriptor, learns the graph topology of the input sample with retrieved similar hard samples (learn adjacency matrix), and learns a semi-supervised GCN to predict the binding affinity (as task predictor). The training step adjusts the parameter values, and in the inference step, the learned model is fine-tuned for each input sample. To evaluate the proposed approach, it is applied to the four well-known PDBbind, Davis, KIBA, and BindingDB datasets. The results show that the proposed method significantly performs better than the comparable approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.07452v2-abstract-full').style.display = 'none'; document.getElementById('2405.07452v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The paper has been archived without permission from all authors. Please withdraw</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.03018">arXiv:2403.03018</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.03018">pdf</a>, <a href="https://arxiv.org/format/2403.03018">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Genomics">q-bio.GN</span> </div> </div> <p class="title is-5 mathjax"> CRISPR: Ensemble Model </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Rostami%2C+M">Mohammad Rostami</a>, <a href="/search/cs?searchtype=author&amp;query=Ghariyazi%2C+A">Amin Ghariyazi</a>, <a href="/search/cs?searchtype=author&amp;query=Dashti%2C+H">Hamed Dashti</a>, <a href="/search/cs?searchtype=author&amp;query=Rohban%2C+M+H">Mohammad Hossein Rohban</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.03018v1-abstract-short" style="display: inline;"> Clustered Regularly Interspaced Short Palindromic Repeats (CRISPR) is a gene editing technology that has revolutionized the fields of biology and medicine. However, one of the challenges of using CRISPR is predicting the on-target efficacy and off-target sensitivity of single-guide RNAs (sgRNAs). This is because most existing methods are trained on separate datasets with different genes and cells,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.03018v1-abstract-full').style.display = 'inline'; document.getElementById('2403.03018v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.03018v1-abstract-full" style="display: none;"> Clustered Regularly Interspaced Short Palindromic Repeats (CRISPR) is a gene editing technology that has revolutionized the fields of biology and medicine. However, one of the challenges of using CRISPR is predicting the on-target efficacy and off-target sensitivity of single-guide RNAs (sgRNAs). This is because most existing methods are trained on separate datasets with different genes and cells, which limits their generalizability. In this paper, we propose a novel ensemble learning method for sgRNA design that is accurate and generalizable. Our method combines the predictions of multiple machine learning models to produce a single, more robust prediction. This approach allows us to learn from a wider range of data, which improves the generalizability of our model. We evaluated our method on a benchmark dataset of sgRNA designs and found that it outperformed existing methods in terms of both accuracy and generalizability. Our results suggest that our method can be used to design sgRNAs with high sensitivity and specificity, even for new genes or cells. This could have important implications for the clinical use of CRISPR, as it would allow researchers to design more effective and safer treatments for a variety of diseases. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.03018v1-abstract-full').style.display = 'none'; document.getElementById('2403.03018v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.15227">arXiv:2311.15227</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2311.15227">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> </div> </div> <p class="title is-5 mathjax"> Epidemic modeling and flattening the infection curve in social networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Doostmohammadian%2C+M">Mohammadreza Doostmohammadian</a>, <a href="/search/cs?searchtype=author&amp;query=Doustmohamadian%2C+S">Soraya Doustmohamadian</a>, <a href="/search/cs?searchtype=author&amp;query=Doostmohammadian%2C+N">Najmeh Doostmohammadian</a>, <a href="/search/cs?searchtype=author&amp;query=Doustmohammadian%2C+A">Azam Doustmohammadian</a>, <a href="/search/cs?searchtype=author&amp;query=Zarrabi%2C+H">Houman Zarrabi</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.15227v1-abstract-short" style="display: inline;"> The main goal of this paper is to model the epidemic and flattening the infection curve of the social networks. Flattening the infection curve implies slowing down the spread of the disease and reducing the infection rate via social-distancing, isolation (quarantine) and vaccination. The nan-pharmaceutical methods are a much simpler and efficient way to control the spread of epidemic and infection&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.15227v1-abstract-full').style.display = 'inline'; document.getElementById('2311.15227v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.15227v1-abstract-full" style="display: none;"> The main goal of this paper is to model the epidemic and flattening the infection curve of the social networks. Flattening the infection curve implies slowing down the spread of the disease and reducing the infection rate via social-distancing, isolation (quarantine) and vaccination. The nan-pharmaceutical methods are a much simpler and efficient way to control the spread of epidemic and infection rate. By specifying a target group with high centrality for isolation and quarantine one can reach a much flatter infection curve (related to Corona for example) without adding extra costs to health services. The aim of this research is, first, modeling the epidemic and, then, giving strategies and structural algorithms for targeted vaccination or targeted non-pharmaceutical methods for reducing the peak of the viral disease and flattening the infection curve. These methods are more efficient for nan-pharmaceutical interventions as finding the target quarantine group flattens the infection curve much easier. For this purpose, a few number of particular nodes with high centrality are isolated and the infection curve is analyzed. Our research shows meaningful results for flattening the infection curve only by isolating a few number of targeted nodes in the social network. The proposed methods are independent of the type of the disease and are effective for any viral disease, e.g., Covid-19. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.15227v1-abstract-full').style.display = 'none'; document.getElementById('2311.15227v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">in Persian language. Journal of Modelling in Engineering 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.18225">arXiv:2310.18225</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.18225">pdf</a>, <a href="https://arxiv.org/format/2310.18225">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> </div> </div> <p class="title is-5 mathjax"> Distributed Delay-Tolerant Strategies for Equality-Constraint Sum-Preserving Resource Allocation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Doostmohammadian%2C+M">Mohammadreza Doostmohammadian</a>, <a href="/search/cs?searchtype=author&amp;query=Aghasi%2C+A">Alireza Aghasi</a>, <a href="/search/cs?searchtype=author&amp;query=Vrakopoulou%2C+M">Maria Vrakopoulou</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a>, <a href="/search/cs?searchtype=author&amp;query=Khan%2C+U+A">Usman A. Khan</a>, <a href="/search/cs?searchtype=author&amp;query=Charalambou%2C+T">Themistoklis Charalambou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.18225v1-abstract-short" style="display: inline;"> This paper proposes two nonlinear dynamics to solve constrained distributed optimization problem for resource allocation over a multi-agent network. In this setup, coupling constraint refers to resource-demand balance which is preserved at all-times. The proposed solutions can address various model nonlinearities, for example, due to quantization and/or saturation. Further, it allows to reach fast&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.18225v1-abstract-full').style.display = 'inline'; document.getElementById('2310.18225v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.18225v1-abstract-full" style="display: none;"> This paper proposes two nonlinear dynamics to solve constrained distributed optimization problem for resource allocation over a multi-agent network. In this setup, coupling constraint refers to resource-demand balance which is preserved at all-times. The proposed solutions can address various model nonlinearities, for example, due to quantization and/or saturation. Further, it allows to reach faster convergence or to robustify the solution against impulsive noise or uncertainties. We prove convergence over weakly connected networks using convex analysis and Lyapunov theory. Our findings show that convergence can be reached for general sign-preserving odd nonlinearity. We further propose delay-tolerant mechanisms to handle general bounded heterogeneous time-varying delays over the communication network of agents while preserving all-time feasibility. This work finds application in CPU scheduling and coverage control among others. This paper advances the state-of-the-art by addressing (i) possible nonlinearity on the agents/links, meanwhile handling (ii) resource-demand feasibility at all times, (iii) uniform-connectivity instead of all-time connectivity, and (iv) possible heterogeneous and time-varying delays. To our best knowledge, no existing work addresses contributions (i)-(iv) altogether. Simulations and comparative analysis are provided to corroborate our contributions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.18225v1-abstract-full').style.display = 'none'; document.getElementById('2310.18225v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> SCL 2023 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.12594">arXiv:2310.12594</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.12594">pdf</a>, <a href="https://arxiv.org/format/2310.12594">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> </div> </div> <p class="title is-5 mathjax"> Infection Curve Flattening via Targeted Interventions and Self-Isolation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Doostmohammadian%2C+M">Mohammadreza Doostmohammadian</a>, <a href="/search/cs?searchtype=author&amp;query=Zarrabi%2C+H">Houman Zarrabi</a>, <a href="/search/cs?searchtype=author&amp;query=Doustmohammadian%2C+A">Azam Doustmohammadian</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.12594v1-abstract-short" style="display: inline;"> Understanding the impact of network clustering and small-world properties on epidemic spread can be crucial in developing effective strategies for managing and controlling infectious diseases. Particularly in this work, we study the impact of these network features on targeted intervention (e.g., self-isolation and quarantine). The targeted individuals for self-isolation are based on centrality me&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.12594v1-abstract-full').style.display = 'inline'; document.getElementById('2310.12594v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.12594v1-abstract-full" style="display: none;"> Understanding the impact of network clustering and small-world properties on epidemic spread can be crucial in developing effective strategies for managing and controlling infectious diseases. Particularly in this work, we study the impact of these network features on targeted intervention (e.g., self-isolation and quarantine). The targeted individuals for self-isolation are based on centrality measures and node influence metrics. Compared to our previous works on scale-free networks, small-world networks are considered in this paper. Small-world networks resemble real-world social and human networks. In this type of network, most nodes are not directly connected but can be reached through a few intermediaries (known as the small-worldness property). Real social networks, such as friendship networks, also exhibit this small-worldness property, where most people are connected through a relatively small number of intermediaries. We particularly study the epidemic curve flattening by centrality-based interventions/isolation over small-world networks. Our results show that high clustering while having low small-worldness (higher shortest path characteristics) implies flatter infection curves. In reality, a flatter infection curve implies that the number of new cases of a disease is spread out over a longer period of time, rather than a sharp and sudden increase in cases (a peak in epidemic). In turn, this reduces the strain on healthcare resources and helps to relieve the healthcare services. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.12594v1-abstract-full').style.display = 'none'; document.getElementById('2310.12594v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> SNAM 2023 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.04855">arXiv:2310.04855</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.04855">pdf</a>, <a href="https://arxiv.org/format/2310.04855">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Epsilon non-Greedy: A Bandit Approach for Unbiased Recommendation via Uniform Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sani%2C+S+M+F">S. M. F. Sani</a>, <a href="/search/cs?searchtype=author&amp;query=Hosseini%2C+S+A">Seyed Abbas Hosseini</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.04855v1-abstract-short" style="display: inline;"> Often, recommendation systems employ continuous training, leading to a self-feedback loop bias in which the system becomes biased toward its previous recommendations. Recent studies have attempted to mitigate this bias by collecting small amounts of unbiased data. While these studies have successfully developed less biased models, they ignore the crucial fact that the recommendations generated by&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.04855v1-abstract-full').style.display = 'inline'; document.getElementById('2310.04855v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.04855v1-abstract-full" style="display: none;"> Often, recommendation systems employ continuous training, leading to a self-feedback loop bias in which the system becomes biased toward its previous recommendations. Recent studies have attempted to mitigate this bias by collecting small amounts of unbiased data. While these studies have successfully developed less biased models, they ignore the crucial fact that the recommendations generated by the model serve as the training data for subsequent training sessions. To address this issue, we propose a framework that learns an unbiased estimator using a small amount of uniformly collected data and focuses on generating improved training data for subsequent training iterations. To accomplish this, we view recommendation as a contextual multi-arm bandit problem and emphasize on exploring items that the model has a limited understanding of. We introduce a new offline sequential training schema that simulates real-world continuous training scenarios in recommendation systems, offering a more appropriate framework for studying self-feedback bias. We demonstrate the superiority of our model over state-of-the-art debiasing methods by conducting extensive experiments using the proposed training schema. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.04855v1-abstract-full').style.display = 'none'; document.getElementById('2310.04855v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.01696">arXiv:2310.01696</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.01696">pdf</a>, <a href="https://arxiv.org/format/2310.01696">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> DANI: Fast Diffusion Aware Network Inference with Preserving Topological Structure Property </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ramezani%2C+M">Maryam Ramezani</a>, <a href="/search/cs?searchtype=author&amp;query=Ahadinia%2C+A">Aryan Ahadinia</a>, <a href="/search/cs?searchtype=author&amp;query=Farhadi%2C+E">Erfan Farhadi</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.01696v1-abstract-short" style="display: inline;"> The fast growth of social networks and their data access limitations in recent years has led to increasing difficulty in obtaining the complete topology of these networks. However, diffusion information over these networks is available, and many algorithms have been proposed to infer the underlying networks using this information. The previously proposed algorithms only focus on inferring more lin&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.01696v1-abstract-full').style.display = 'inline'; document.getElementById('2310.01696v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.01696v1-abstract-full" style="display: none;"> The fast growth of social networks and their data access limitations in recent years has led to increasing difficulty in obtaining the complete topology of these networks. However, diffusion information over these networks is available, and many algorithms have been proposed to infer the underlying networks using this information. The previously proposed algorithms only focus on inferring more links and ignore preserving the critical topological characteristics of the underlying social networks. In this paper, we propose a novel method called DANI to infer the underlying network while preserving its structural properties. It is based on the Markov transition matrix derived from time series cascades, as well as the node-node similarity that can be observed in the cascade behavior from a structural point of view. In addition, the presented method has linear time complexity (increases linearly with the number of nodes, number of cascades, and square of the average length of cascades), and its distributed version in the MapReduce framework is also scalable. We applied the proposed approach to both real and synthetic networks. The experimental results showed that DANI has higher accuracy and lower run time while maintaining structural properties, including modular structure, degree distribution, connected components, density, and clustering coefficients, than well-known network inference methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.01696v1-abstract-full').style.display = 'none'; document.getElementById('2310.01696v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: substantial text overlap with arXiv:1706.00941</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.13766">arXiv:2307.13766</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2307.13766">pdf</a>, <a href="https://arxiv.org/format/2307.13766">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> ClusterSeq: Enhancing Sequential Recommender Systems with Clustering based Meta-Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Maheri%2C+M">Mohammmadmahdi Maheri</a>, <a href="/search/cs?searchtype=author&amp;query=Abdollahzadeh%2C+R">Reza Abdollahzadeh</a>, <a href="/search/cs?searchtype=author&amp;query=Mohammadi%2C+B">Bardia Mohammadi</a>, <a href="/search/cs?searchtype=author&amp;query=Rafiei%2C+M">Mina Rafiei</a>, <a href="/search/cs?searchtype=author&amp;query=Habibi%2C+J">Jafar Habibi</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.13766v1-abstract-short" style="display: inline;"> In practical scenarios, the effectiveness of sequential recommendation systems is hindered by the user cold-start problem, which arises due to limited interactions for accurately determining user preferences. Previous studies have attempted to address this issue by combining meta-learning with user and item-side information. However, these approaches face inherent challenges in modeling user prefe&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.13766v1-abstract-full').style.display = 'inline'; document.getElementById('2307.13766v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.13766v1-abstract-full" style="display: none;"> In practical scenarios, the effectiveness of sequential recommendation systems is hindered by the user cold-start problem, which arises due to limited interactions for accurately determining user preferences. Previous studies have attempted to address this issue by combining meta-learning with user and item-side information. However, these approaches face inherent challenges in modeling user preference dynamics, particularly for &#34;minor users&#34; who exhibit distinct preferences compared to more common or &#34;major users.&#34; To overcome these limitations, we present a novel approach called ClusterSeq, a Meta-Learning Clustering-Based Sequential Recommender System. ClusterSeq leverages dynamic information in the user sequence to enhance item prediction accuracy, even in the absence of side information. This model preserves the preferences of minor users without being overshadowed by major users, and it capitalizes on the collective knowledge of users within the same cluster. Extensive experiments conducted on various benchmark datasets validate the effectiveness of ClusterSeq. Empirical results consistently demonstrate that ClusterSeq outperforms several state-of-the-art meta-learning recommenders. Notably, compared to existing meta-learning methods, our proposed approach achieves a substantial improvement of 16-39% in Mean Reciprocal Rank (MRR). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.13766v1-abstract-full').style.display = 'none'; document.getElementById('2307.13766v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.09173">arXiv:2303.09173</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2303.09173">pdf</a>, <a href="https://arxiv.org/format/2303.09173">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> </div> </div> <p class="title is-5 mathjax"> Network-based Control of Epidemic via Flattening the Infection Curve: High-Clustered vs. Low-Clustered Social Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Doostmohammadian%2C+M">Mohammadreza Doostmohammadian</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.09173v1-abstract-short" style="display: inline;"> Recent studies in network science and control have shown a meaningful relationship between the epidemic processes (e.g., COVID-19 spread) and some network properties. This paper studies how such network properties, namely clustering coefficient and centrality measures (or node influence metrics), affect the spread of viruses and the growth of epidemics over scale-free networks. The results can be&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.09173v1-abstract-full').style.display = 'inline'; document.getElementById('2303.09173v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.09173v1-abstract-full" style="display: none;"> Recent studies in network science and control have shown a meaningful relationship between the epidemic processes (e.g., COVID-19 spread) and some network properties. This paper studies how such network properties, namely clustering coefficient and centrality measures (or node influence metrics), affect the spread of viruses and the growth of epidemics over scale-free networks. The results can be used to target individuals (the nodes in the network) to \textit{flatten the infection curve}. This so-called flattening of the infection curve is to reduce the health service costs and burden to the authorities/governments. Our Monte-Carlo simulation results show that clustered networks are, in general, easier to flatten the infection curve, i.e., with the same connectivity and the same number of isolated individuals they result in more flattened curves. Moreover, distance-based centrality measures, which target the nodes based on their average network distance to other nodes (and not the node degrees), are better choices for targeting individuals for isolation/vaccination. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.09173v1-abstract-full').style.display = 'none'; document.getElementById('2303.09173v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published in Social network analysis and mining</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2212.03176">arXiv:2212.03176</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2212.03176">pdf</a>, <a href="https://arxiv.org/format/2212.03176">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Domain Adaptation and Generalization on Functional Medical Images: A Systematic Survey </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sarafraz%2C+G">Gita Sarafraz</a>, <a href="/search/cs?searchtype=author&amp;query=Behnamnia%2C+A">Armin Behnamnia</a>, <a href="/search/cs?searchtype=author&amp;query=Hosseinzadeh%2C+M">Mehran Hosseinzadeh</a>, <a href="/search/cs?searchtype=author&amp;query=Balapour%2C+A">Ali Balapour</a>, <a href="/search/cs?searchtype=author&amp;query=Meghrazi%2C+A">Amin Meghrazi</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2212.03176v1-abstract-short" style="display: inline;"> Machine learning algorithms have revolutionized different fields, including natural language processing, computer vision, signal processing, and medical data processing. Despite the excellent capabilities of machine learning algorithms in various tasks and areas, the performance of these models mainly deteriorates when there is a shift in the test and training data distributions. This gap occurs d&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.03176v1-abstract-full').style.display = 'inline'; document.getElementById('2212.03176v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2212.03176v1-abstract-full" style="display: none;"> Machine learning algorithms have revolutionized different fields, including natural language processing, computer vision, signal processing, and medical data processing. Despite the excellent capabilities of machine learning algorithms in various tasks and areas, the performance of these models mainly deteriorates when there is a shift in the test and training data distributions. This gap occurs due to the violation of the fundamental assumption that the training and test data are independent and identically distributed (i.i.d). In real-world scenarios where collecting data from all possible domains for training is costly and even impossible, the i.i.d assumption can hardly be satisfied. The problem is even more severe in the case of medical images and signals because it requires either expensive equipment or a meticulous experimentation setup to collect data, even for a single domain. Additionally, the decrease in performance may have severe consequences in the analysis of medical records. As a result of such problems, the ability to generalize and adapt under distribution shifts (domain generalization (DG) and domain adaptation (DA)) is essential for the analysis of medical data. This paper provides the first systematic review of DG and DA on functional brain signals to fill the gap of the absence of a comprehensive study in this era. We provide detailed explanations and categorizations of datasets, approaches, and architectures used in DG and DA on functional brain images. We further address the attention-worthy future tracks in this field. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.03176v1-abstract-full').style.display = 'none'; document.getElementById('2212.03176v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">41 pages, 8 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.09681">arXiv:2209.09681</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2209.09681">pdf</a>, <a href="https://arxiv.org/format/2209.09681">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1371/journal.pone.0277887">10.1371/journal.pone.0277887 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> SCGG: A Deep Structure-Conditioned Graph Generative Model </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Faez%2C+F">Faezeh Faez</a>, <a href="/search/cs?searchtype=author&amp;query=Dijujin%2C+N+H">Negin Hashemi Dijujin</a>, <a href="/search/cs?searchtype=author&amp;query=Baghshah%2C+M+S">Mahdieh Soleymani Baghshah</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.09681v1-abstract-short" style="display: inline;"> Deep learning-based graph generation approaches have remarkable capacities for graph data modeling, allowing them to solve a wide range of real-world problems. Making these methods able to consider different conditions during the generation procedure even increases their effectiveness by empowering them to generate new graph samples that meet the desired criteria. This paper presents a conditional&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.09681v1-abstract-full').style.display = 'inline'; document.getElementById('2209.09681v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.09681v1-abstract-full" style="display: none;"> Deep learning-based graph generation approaches have remarkable capacities for graph data modeling, allowing them to solve a wide range of real-world problems. Making these methods able to consider different conditions during the generation procedure even increases their effectiveness by empowering them to generate new graph samples that meet the desired criteria. This paper presents a conditional deep graph generation method called SCGG that considers a particular type of structural conditions. Specifically, our proposed SCGG model takes an initial subgraph and autoregressively generates new nodes and their corresponding edges on top of the given conditioning substructure. The architecture of SCGG consists of a graph representation learning network and an autoregressive generative model, which is trained end-to-end. Using this model, we can address graph completion, a rampant and inherently difficult problem of recovering missing nodes and their associated edges of partially observed graphs. Experimental results on both synthetic and real-world datasets demonstrate the superiority of our method compared with state-of-the-art baselines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.09681v1-abstract-full').style.display = 'none'; document.getElementById('2209.09681v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.07148">arXiv:2209.07148</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2209.07148">pdf</a>, <a href="https://arxiv.org/ps/2209.07148">ps</a>, <a href="https://arxiv.org/format/2209.07148">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Semi-supervised Batch Learning From Logged Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Aminian%2C+G">Gholamali Aminian</a>, <a href="/search/cs?searchtype=author&amp;query=Behnamnia%2C+A">Armin Behnamnia</a>, <a href="/search/cs?searchtype=author&amp;query=Vega%2C+R">Roberto Vega</a>, <a href="/search/cs?searchtype=author&amp;query=Toni%2C+L">Laura Toni</a>, <a href="/search/cs?searchtype=author&amp;query=Shi%2C+C">Chengchun Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a>, <a href="/search/cs?searchtype=author&amp;query=Rivasplata%2C+O">Omar Rivasplata</a>, <a href="/search/cs?searchtype=author&amp;query=Rodrigues%2C+M+R+D">Miguel R. D. Rodrigues</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.07148v3-abstract-short" style="display: inline;"> Off-policy learning methods are intended to learn a policy from logged data, which includes context, action, and feedback (cost or reward) for each sample point. In this work, we build on the counterfactual risk minimization framework, which also assumes access to propensity scores. We propose learning methods for problems where feedback is missing for some samples, so there are samples with feedb&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.07148v3-abstract-full').style.display = 'inline'; document.getElementById('2209.07148v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.07148v3-abstract-full" style="display: none;"> Off-policy learning methods are intended to learn a policy from logged data, which includes context, action, and feedback (cost or reward) for each sample point. In this work, we build on the counterfactual risk minimization framework, which also assumes access to propensity scores. We propose learning methods for problems where feedback is missing for some samples, so there are samples with feedback and samples missing-feedback in the logged data. We refer to this type of learning as semi-supervised batch learning from logged data, which arises in a wide range of application domains. We derive a novel upper bound for the true risk under the inverse propensity score estimator to address this kind of learning problem. Using this bound, we propose a regularized semi-supervised batch learning method with logged data where the regularization term is feedback-independent and, as a result, can be evaluated using the logged missing-feedback data. Consequently, even though feedback is only present for some samples, a learning policy can be learned by leveraging the missing-feedback samples. The results of experiments derived from benchmark datasets indicate that these algorithms achieve policies with better performance in comparison with logging policies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.07148v3-abstract-full').style.display = 'none'; document.getElementById('2209.07148v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">46 pages,</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2202.09914">arXiv:2202.09914</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2202.09914">pdf</a>, <a href="https://arxiv.org/format/2202.09914">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> SOInter: A Novel Deep Energy Based Interpretation Method for Explaining Structured Output Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Seyyedsalehi%2C+S+F">S. Fatemeh Seyyedsalehi</a>, <a href="/search/cs?searchtype=author&amp;query=Soleymani%2C+M">Mahdieh Soleymani</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2202.09914v1-abstract-short" style="display: inline;"> We propose a novel interpretation technique to explain the behavior of structured output models, which learn mappings between an input vector to a set of output variables simultaneously. Because of the complex relationship between the computational path of output variables in structured models, a feature can affect the value of output through other ones. We focus on one of the outputs as the targe&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.09914v1-abstract-full').style.display = 'inline'; document.getElementById('2202.09914v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2202.09914v1-abstract-full" style="display: none;"> We propose a novel interpretation technique to explain the behavior of structured output models, which learn mappings between an input vector to a set of output variables simultaneously. Because of the complex relationship between the computational path of output variables in structured models, a feature can affect the value of output through other ones. We focus on one of the outputs as the target and try to find the most important features utilized by the structured model to decide on the target in each locality of the input space. In this paper, we assume an arbitrary structured output model is available as a black box and argue how considering the correlations between output variables can improve the explanation performance. The goal is to train a function as an interpreter for the target output variable over the input space. We introduce an energy-based training process for the interpreter function, which effectively considers the structural information incorporated into the model to be explained. The effectiveness of the proposed method is confirmed using a variety of simulated and real data sets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.09914v1-abstract-full').style.display = 'none'; document.getElementById('2202.09914v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2201.11808">arXiv:2201.11808</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2201.11808">pdf</a>, <a href="https://arxiv.org/format/2201.11808">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> LAP: An Attention-Based Module for Concept Based Self-Interpretation and Knowledge Injection in Convolutional Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Modegh%2C+R+G">Rassa Ghavami Modegh</a>, <a href="/search/cs?searchtype=author&amp;query=Salimi%2C+A">Ahmad Salimi</a>, <a href="/search/cs?searchtype=author&amp;query=Dizaji%2C+A">Alireza Dizaji</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2201.11808v5-abstract-short" style="display: inline;"> Despite the state-of-the-art performance of deep convolutional neural networks, they are susceptible to bias and malfunction in unseen situations. Moreover, the complex computation behind their reasoning is not human-understandable to develop trust. External explainer methods have tried to interpret network decisions in a human-understandable way, but they are accused of fallacies due to their ass&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.11808v5-abstract-full').style.display = 'inline'; document.getElementById('2201.11808v5-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2201.11808v5-abstract-full" style="display: none;"> Despite the state-of-the-art performance of deep convolutional neural networks, they are susceptible to bias and malfunction in unseen situations. Moreover, the complex computation behind their reasoning is not human-understandable to develop trust. External explainer methods have tried to interpret network decisions in a human-understandable way, but they are accused of fallacies due to their assumptions and simplifications. On the other side, the inherent self-interpretability of models, while being more robust to the mentioned fallacies, cannot be applied to the already trained models. In this work, we propose a new attention-based pooling layer, called Local Attention Pooling (LAP), that accomplishes self-interpretability and the possibility for knowledge injection without performance loss. The module is easily pluggable into any convolutional neural network, even the already trained ones. We have defined a weakly supervised training scheme to learn the distinguishing features in decision-making without depending on experts&#39; annotations. We verified our claims by evaluating several LAP-extended models on two datasets, including ImageNet. The proposed framework offers more valid human-understandable and faithful-to-the-model interpretations than the commonly used white-box explainer methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.11808v5-abstract-full').style.display = 'none'; document.getElementById('2201.11808v5-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68T07; 68T99 (Primary) 68T45 (Secondary) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.01131">arXiv:2112.01131</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2112.01131">pdf</a>, <a href="https://arxiv.org/format/2112.01131">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> </div> <p class="title is-5 mathjax"> FNR: A Similarity and Transformer-Based Approach to Detect Multi-Modal Fake News in Social Media </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ghorbanpour%2C+F">Faeze Ghorbanpour</a>, <a href="/search/cs?searchtype=author&amp;query=Ramezani%2C+M">Maryam Ramezani</a>, <a href="/search/cs?searchtype=author&amp;query=Fazli%2C+M+A">Mohammad A. Fazli</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.01131v1-abstract-short" style="display: inline;"> The availability and interactive nature of social media have made them the primary source of news around the globe. The popularity of social media tempts criminals to pursue their immoral intentions by producing and disseminating fake news using seductive text and misleading images. Therefore, verifying social media news and spotting fakes is crucial. This work aims to analyze multi-modal features&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.01131v1-abstract-full').style.display = 'inline'; document.getElementById('2112.01131v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.01131v1-abstract-full" style="display: none;"> The availability and interactive nature of social media have made them the primary source of news around the globe. The popularity of social media tempts criminals to pursue their immoral intentions by producing and disseminating fake news using seductive text and misleading images. Therefore, verifying social media news and spotting fakes is crucial. This work aims to analyze multi-modal features from texts and images in social media for detecting fake news. We propose a Fake News Revealer (FNR) method that utilizes transform learning to extract contextual and semantic features and contrastive loss to determine the similarity between image and text. We applied FNR on two real social media datasets. The results show the proposed method achieves higher accuracies in detecting fake news compared to the previous works. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.01131v1-abstract-full').style.display = 'none'; document.getElementById('2112.01131v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 11 figures, 4 tables and 20 references</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2111.03297">arXiv:2111.03297</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2111.03297">pdf</a>, <a href="https://arxiv.org/format/2111.03297">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TETC.2021.3102041">10.1109/TETC.2021.3102041 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> RC-RNN: Reconfigurable Cache Architecture for Storage Systems Using Recurrent Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ebrahimi%2C+S">Shahriar Ebrahimi</a>, <a href="/search/cs?searchtype=author&amp;query=Salkhordeh%2C+R">Reza Salkhordeh</a>, <a href="/search/cs?searchtype=author&amp;query=Osia%2C+S+A">Seyed Ali Osia</a>, <a href="/search/cs?searchtype=author&amp;query=Taheri%2C+A">Ali Taheri</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid Reza Rabiee</a>, <a href="/search/cs?searchtype=author&amp;query=Asadi%2C+H">Hossein Asadi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2111.03297v1-abstract-short" style="display: inline;"> Solid-State Drives (SSDs) have significant performance advantages over traditional Hard Disk Drives (HDDs) such as lower latency and higher throughput. Significantly higher price per capacity and limited lifetime, however, prevents designers to completely substitute HDDs by SSDs in enterprise storage systems. SSD-based caching has recently been suggested for storage systems to benefit from higher&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.03297v1-abstract-full').style.display = 'inline'; document.getElementById('2111.03297v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2111.03297v1-abstract-full" style="display: none;"> Solid-State Drives (SSDs) have significant performance advantages over traditional Hard Disk Drives (HDDs) such as lower latency and higher throughput. Significantly higher price per capacity and limited lifetime, however, prevents designers to completely substitute HDDs by SSDs in enterprise storage systems. SSD-based caching has recently been suggested for storage systems to benefit from higher performance of SSDs while minimizing the overall cost. While conventional caching algorithms such as Least Recently Used (LRU) provide high hit ratio in processors, due to the highly random behavior of Input/Output (I/O) workloads, they hardly provide the required performance level for storage systems. In addition to poor performance, inefficient algorithms also shorten SSD lifetime with unnecessary cache replacements. Such shortcomings motivate us to benefit from more complex non-linear algorithms to achieve higher cache performance and extend SSD lifetime. In this paper, we propose RC-RNN, the first reconfigurable SSD-based cache architecture for storage systems that utilizes machine learning to identify performance-critical data pages for I/O caching. The proposed architecture uses Recurrent Neural Networks (RNN) to characterize ongoing workloads and optimize itself towards higher cache performance while improving SSD lifetime. RC-RNN attempts to learn characteristics of the running workload to predict its behavior and then uses the collected information to identify performance-critical data pages to fetch into the cache. Experimental results show that RC-RNN characterizes workloads with an accuracy up to 94.6% for SNIA I/O workloads. RC-RNN can perform similarly to the optimal cache algorithm by an accuracy of 95% on average, and outperforms previous SSD caching architectures by providing up to 7x higher hit ratio and decreasing cache replacements by up to 2x. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.03297v1-abstract-full').style.display = 'none'; document.getElementById('2111.03297v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Date of Publication: 09 August 2021</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEEE Transactions on Emerging Topics in Computing (2021) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.03800">arXiv:2110.03800</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.03800">pdf</a>, <a href="https://arxiv.org/format/2110.03800">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3487553.3524721">10.1145/3487553.3524721 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> CCGG: A Deep Autoregressive Model for Class-Conditional Graph Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ommi%2C+Y">Yassaman Ommi</a>, <a href="/search/cs?searchtype=author&amp;query=Yousefabadi%2C+M">Matin Yousefabadi</a>, <a href="/search/cs?searchtype=author&amp;query=Faez%2C+F">Faezeh Faez</a>, <a href="/search/cs?searchtype=author&amp;query=Sabour%2C+A">Amirmojtaba Sabour</a>, <a href="/search/cs?searchtype=author&amp;query=Baghshah%2C+M+S">Mahdieh Soleymani Baghshah</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.03800v2-abstract-short" style="display: inline;"> Graph data structures are fundamental for studying connected entities. With an increase in the number of applications where data is represented as graphs, the problem of graph generation has recently become a hot topic. However, despite its significance, conditional graph generation that creates graphs with desired features is relatively less explored in previous studies. This paper addresses the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.03800v2-abstract-full').style.display = 'inline'; document.getElementById('2110.03800v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.03800v2-abstract-full" style="display: none;"> Graph data structures are fundamental for studying connected entities. With an increase in the number of applications where data is represented as graphs, the problem of graph generation has recently become a hot topic. However, despite its significance, conditional graph generation that creates graphs with desired features is relatively less explored in previous studies. This paper addresses the problem of class-conditional graph generation that uses class labels as generation constraints by introducing the Class Conditioned Graph Generator (CCGG). We built CCGG by injecting the class information as an additional input into a graph generator model and including a classification loss in its total loss along with a gradient passing trick. Our experiments show that CCGG outperforms existing conditional graph generation methods on various datasets. It also manages to maintain the quality of the generated graphs in terms of distribution-based evaluation metrics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.03800v2-abstract-full').style.display = 'none'; document.getElementById('2110.03800v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 April, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.09329">arXiv:2109.09329</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2109.09329">pdf</a>, <a href="https://arxiv.org/format/2109.09329">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Distributed Detection and Mitigation of Biasing Attacks over Multi-Agent Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Doostmohammadian%2C+M">Mohammadreza Doostmohammadian</a>, <a href="/search/cs?searchtype=author&amp;query=Zarrabi%2C+H">Houman Zarrabi</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a>, <a href="/search/cs?searchtype=author&amp;query=Khan%2C+U+A">Usman A. Khan</a>, <a href="/search/cs?searchtype=author&amp;query=Charalambous%2C+T">Themistoklis Charalambous</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.09329v1-abstract-short" style="display: inline;"> This paper proposes a distributed attack detection and mitigation technique based on distributed estimation over a multi-agent network, where the agents take partial system measurements susceptible to (possible) biasing attacks. In particular, we assume that the system is not locally observable via the measurements in the direct neighborhood of any agent. First, for performance analysis in the att&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.09329v1-abstract-full').style.display = 'inline'; document.getElementById('2109.09329v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.09329v1-abstract-full" style="display: none;"> This paper proposes a distributed attack detection and mitigation technique based on distributed estimation over a multi-agent network, where the agents take partial system measurements susceptible to (possible) biasing attacks. In particular, we assume that the system is not locally observable via the measurements in the direct neighborhood of any agent. First, for performance analysis in the attack-free case, we show that the proposed distributed estimation is unbiased with bounded mean-square deviation in steady-state. Then, we propose a residual-based strategy to locally detect possible attacks at agents. In contrast to the deterministic thresholds in the literature assuming an upper bound on the noise support, we define the thresholds on the residuals in a probabilistic sense. After detecting and isolating the attacked agent, a system-digraph-based mitigation strategy is proposed to replace the attacked measurement with a new observationally-equivalent one to recover potential observability loss. We adopt a graph-theoretic method to classify the agents based on their measurements, to distinguish between the agents recovering the system rank-deficiency and the ones recovering output-connectivity of the system digraph. The attack detection/mitigation strategy is specifically described for each type, which is of polynomial-order complexity for large-scale applications. Illustrative simulations support our theoretical results. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.09329v1-abstract-full').style.display = 'none'; document.getElementById('2109.09329v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted TNSE</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2105.10641">arXiv:2105.10641</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2105.10641">pdf</a>, <a href="https://arxiv.org/format/2105.10641">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Dynamical Systems">math.DS</span> </div> </div> <p class="title is-5 mathjax"> Analysis of Contractions in System Graphs: Application to State Estimation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Doostmohammadian%2C+M">Mohammadreza Doostmohammadian</a>, <a href="/search/cs?searchtype=author&amp;query=Charalambous%2C+T">Themistoklis Charalambous</a>, <a href="/search/cs?searchtype=author&amp;query=Shafie-khah%2C+M">Miadreza Shafie-khah</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a>, <a href="/search/cs?searchtype=author&amp;query=Khan%2C+U+A">Usman A. Khan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2105.10641v1-abstract-short" style="display: inline;"> Observability and estimation are closely tied to the system structure, which can be visualized as a system graph--a graph that captures the inter-dependencies within the state variables. For example, in social system graphs such inter-dependencies represent the social interactions of different individuals. It was recently shown that contractions, a key concept from graph theory, in the system grap&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.10641v1-abstract-full').style.display = 'inline'; document.getElementById('2105.10641v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2105.10641v1-abstract-full" style="display: none;"> Observability and estimation are closely tied to the system structure, which can be visualized as a system graph--a graph that captures the inter-dependencies within the state variables. For example, in social system graphs such inter-dependencies represent the social interactions of different individuals. It was recently shown that contractions, a key concept from graph theory, in the system graph are critical to system observability, as (at least) one state measurement in every contraction is necessary for observability. Thus, the size and number of contractions are critical in recovering for loss of observability. In this paper, the correlation between the average-size/number of contractions and the global clustering coefficient (GCC) of the system graph is studied. Our empirical results show that estimating systems with high GCC requires fewer measurements, and in case of measurement failure, there are fewer possible options to find substitute measurement that recovers the system&#39;s observability. This is significant as by tuning the GCC, we can improve the observability properties of large-scale engineered networks, such as social networks and smart grid. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.10641v1-abstract-full').style.display = 'none'; document.getElementById('2105.10641v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2104.07613">arXiv:2104.07613</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2104.07613">pdf</a>, <a href="https://arxiv.org/format/2104.07613">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> SINA-BERT: A pre-trained Language Model for Analysis of Medical Texts in Persian </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Taghizadeh%2C+N">Nasrin Taghizadeh</a>, <a href="/search/cs?searchtype=author&amp;query=Doostmohammadi%2C+E">Ehsan Doostmohammadi</a>, <a href="/search/cs?searchtype=author&amp;query=Seifossadat%2C+E">Elham Seifossadat</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a>, <a href="/search/cs?searchtype=author&amp;query=Tahaei%2C+M+S">Maedeh S. Tahaei</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2104.07613v1-abstract-short" style="display: inline;"> We have released Sina-BERT, a language model pre-trained on BERT (Devlin et al., 2018) to address the lack of a high-quality Persian language model in the medical domain. SINA-BERT utilizes pre-training on a large-scale corpus of medical contents including formal and informal texts collected from a variety of online resources in order to improve the performance on health-care related tasks. We emp&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.07613v1-abstract-full').style.display = 'inline'; document.getElementById('2104.07613v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2104.07613v1-abstract-full" style="display: none;"> We have released Sina-BERT, a language model pre-trained on BERT (Devlin et al., 2018) to address the lack of a high-quality Persian language model in the medical domain. SINA-BERT utilizes pre-training on a large-scale corpus of medical contents including formal and informal texts collected from a variety of online resources in order to improve the performance on health-care related tasks. We employ SINA-BERT to complete following representative tasks: categorization of medical questions, medical sentiment analysis, and medical question retrieval. For each task, we have developed Persian annotated data sets for training and evaluation and learnt a representation for the data of each task especially complex and long medical questions. With the same architecture being used across tasks, SINA-BERT outperforms BERT-based models that were previously made available in the Persian language. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.07613v1-abstract-full').style.display = 'none'; document.getElementById('2104.07613v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 April, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2104.03597">arXiv:2104.03597</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2104.03597">pdf</a>, <a href="https://arxiv.org/format/2104.03597">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-030-87240-3_68">10.1007/978-3-030-87240-3_68 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> GKD: Semi-supervised Graph Knowledge Distillation for Graph-Independent Inference </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ghorbani%2C+M">Mahsa Ghorbani</a>, <a href="/search/cs?searchtype=author&amp;query=Bahrami%2C+M">Mojtaba Bahrami</a>, <a href="/search/cs?searchtype=author&amp;query=Kazi%2C+A">Anees Kazi</a>, <a href="/search/cs?searchtype=author&amp;query=SoleymaniBaghshah%2C+M">Mahdieh SoleymaniBaghshah</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a>, <a href="/search/cs?searchtype=author&amp;query=Navab%2C+N">Nassir Navab</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2104.03597v1-abstract-short" style="display: inline;"> The increased amount of multi-modal medical data has opened the opportunities to simultaneously process various modalities such as imaging and non-imaging data to gain a comprehensive insight into the disease prediction domain. Recent studies using Graph Convolutional Networks (GCNs) provide novel semi-supervised approaches for integrating heterogeneous modalities while investigating the patients&#39;&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.03597v1-abstract-full').style.display = 'inline'; document.getElementById('2104.03597v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2104.03597v1-abstract-full" style="display: none;"> The increased amount of multi-modal medical data has opened the opportunities to simultaneously process various modalities such as imaging and non-imaging data to gain a comprehensive insight into the disease prediction domain. Recent studies using Graph Convolutional Networks (GCNs) provide novel semi-supervised approaches for integrating heterogeneous modalities while investigating the patients&#39; associations for disease prediction. However, when the meta-data used for graph construction is not available at inference time (e.g., coming from a distinct population), the conventional methods exhibit poor performance. To address this issue, we propose a novel semi-supervised approach named GKD based on knowledge distillation. We train a teacher component that employs the label-propagation algorithm besides a deep neural network to benefit from the graph and non-graph modalities only in the training phase. The teacher component embeds all the available information into the soft pseudo-labels. The soft pseudo-labels are then used to train a deep student network for disease prediction of unseen test data for which the graph modality is unavailable. We perform our experiments on two public datasets for diagnosing Autism spectrum disorder, and Alzheimer&#39;s disease, along with a thorough analysis on synthetic multi-modal datasets. According to these experiments, GKD outperforms the previous graph-based deep learning methods in terms of accuracy, AUC, and Macro F1. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.03597v1-abstract-full').style.display = 'none'; document.getElementById('2104.03597v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 April, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2103.10056">arXiv:2103.10056</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2103.10056">pdf</a>, <a href="https://arxiv.org/format/2103.10056">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Dementia Severity Classification under Small Sample Size and Weak Supervision in Thick Slice MRI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shirkavand%2C+R">Reza Shirkavand</a>, <a href="/search/cs?searchtype=author&amp;query=Ayromlou%2C+S">Sana Ayromlou</a>, <a href="/search/cs?searchtype=author&amp;query=Farghadani%2C+S">Soroush Farghadani</a>, <a href="/search/cs?searchtype=author&amp;query=Tahaei%2C+M">Maedeh-sadat Tahaei</a>, <a href="/search/cs?searchtype=author&amp;query=Pourakpour%2C+F">Fattane Pourakpour</a>, <a href="/search/cs?searchtype=author&amp;query=Siahlou%2C+B">Bahareh Siahlou</a>, <a href="/search/cs?searchtype=author&amp;query=Khodakarami%2C+Z">Zeynab Khodakarami</a>, <a href="/search/cs?searchtype=author&amp;query=Rohban%2C+M+H">Mohammad H. Rohban</a>, <a href="/search/cs?searchtype=author&amp;query=Fatehi%2C+M">Mansoor Fatehi</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2103.10056v1-abstract-short" style="display: inline;"> Early detection of dementia through specific biomarkers in MR images plays a critical role in developing support strategies proactively. Fazekas scale facilitates an accurate quantitative assessment of the severity of white matter lesions and hence the disease. Imaging Biomarkers of dementia are multiple and comprehensive documentation of them is time-consuming. Therefore, any effort to automatica&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.10056v1-abstract-full').style.display = 'inline'; document.getElementById('2103.10056v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2103.10056v1-abstract-full" style="display: none;"> Early detection of dementia through specific biomarkers in MR images plays a critical role in developing support strategies proactively. Fazekas scale facilitates an accurate quantitative assessment of the severity of white matter lesions and hence the disease. Imaging Biomarkers of dementia are multiple and comprehensive documentation of them is time-consuming. Therefore, any effort to automatically extract these biomarkers will be of clinical value while reducing inter-rater discrepancies. To tackle this problem, we propose to classify the disease severity based on the Fazekas scale through the visual biomarkers, namely the Periventricular White Matter (PVWM) and the Deep White Matter (DWM) changes, in the real-world setting of thick-slice MRI. Small training sample size and weak supervision in form of assigning severity labels to the whole MRI stack are among the main challenges. To combat the mentioned issues, we have developed a deep learning pipeline that employs self-supervised representation learning, multiple instance learning, and appropriate pre-processing steps. We use pretext tasks such as non-linear transformation, local shuffling, in- and out-painting for self-supervised learning of useful features in this domain. Furthermore, an attention model is used to determine the relevance of each MRI slice for predicting the Fazekas scale in an unsupervised manner. We show the significant superiority of our method in distinguishing different classes of dementia compared to state-of-the-art methods in our mentioned setting, which improves the macro averaged F1-score of state-of-the-art from 61% to 76% in PVWM, and from 58% to 69.2% in DWM. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.10056v1-abstract-full').style.display = 'none'; document.getElementById('2103.10056v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 March, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 5 figues</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2103.00221">arXiv:2103.00221</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2103.00221">pdf</a>, <a href="https://arxiv.org/format/2103.00221">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.media.2021.102272">10.1016/j.media.2021.102272 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> RA-GCN: Graph Convolutional Network for Disease Prediction Problems with Imbalanced Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ghorbani%2C+M">Mahsa Ghorbani</a>, <a href="/search/cs?searchtype=author&amp;query=Kazi%2C+A">Anees Kazi</a>, <a href="/search/cs?searchtype=author&amp;query=Baghshah%2C+M+S">Mahdieh Soleymani Baghshah</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a>, <a href="/search/cs?searchtype=author&amp;query=Navab%2C+N">Nassir Navab</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2103.00221v3-abstract-short" style="display: inline;"> Disease prediction is a well-known classification problem in medical applications. GCNs provide a powerful tool for analyzing the patients&#39; features relative to each other. This can be achieved by modeling the problem as a graph node classification task, where each node is a patient. Due to the nature of such medical datasets, class imbalance is a prevalent issue in the field of disease prediction&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.00221v3-abstract-full').style.display = 'inline'; document.getElementById('2103.00221v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2103.00221v3-abstract-full" style="display: none;"> Disease prediction is a well-known classification problem in medical applications. GCNs provide a powerful tool for analyzing the patients&#39; features relative to each other. This can be achieved by modeling the problem as a graph node classification task, where each node is a patient. Due to the nature of such medical datasets, class imbalance is a prevalent issue in the field of disease prediction, where the distribution of classes is skewed. When the class imbalance is present in the data, the existing graph-based classifiers tend to be biased towards the major class(es) and neglect the samples in the minor class(es). On the other hand, the correct diagnosis of the rare positive cases among all the patients is vital in a healthcare system. In conventional methods, such imbalance is tackled by assigning appropriate weights to classes in the loss function which is still dependent on the relative values of weights, sensitive to outliers, and in some cases biased towards the minor class(es). In this paper, we propose a Re-weighted Adversarial Graph Convolutional Network (RA-GCN) to prevent the graph-based classifier from emphasizing the samples of any particular class. This is accomplished by associating a graph-based neural network to each class, which is responsible for weighting the class samples and changing the importance of each sample for the classifier. Therefore, the classifier adjusts itself and determines the boundary between classes with more attention to the important samples. The parameters of the classifier and weighting networks are trained by an adversarial approach. We show experiments on synthetic and three publicly available medical datasets. RA-GCN demonstrates the superiority compared to recent methods in identifying the patient&#39;s status on all three datasets. The detailed analysis is provided as quantitative and qualitative experiments on synthetic datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.00221v3-abstract-full').style.display = 'none'; document.getElementById('2103.00221v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2012.15544">arXiv:2012.15544</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2012.15544">pdf</a>, <a href="https://arxiv.org/format/2012.15544">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> </div> <p class="title is-5 mathjax"> Deep Graph Generators: A Survey </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Faez%2C+F">Faezeh Faez</a>, <a href="/search/cs?searchtype=author&amp;query=Ommi%2C+Y">Yassaman Ommi</a>, <a href="/search/cs?searchtype=author&amp;query=Baghshah%2C+M+S">Mahdieh Soleymani Baghshah</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2012.15544v1-abstract-short" style="display: inline;"> Deep generative models have achieved great success in areas such as image, speech, and natural language processing in the past few years. Thanks to the advances in graph-based deep learning, and in particular graph representation learning, deep graph generation methods have recently emerged with new applications ranging from discovering novel molecular structures to modeling social networks. This&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.15544v1-abstract-full').style.display = 'inline'; document.getElementById('2012.15544v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2012.15544v1-abstract-full" style="display: none;"> Deep generative models have achieved great success in areas such as image, speech, and natural language processing in the past few years. Thanks to the advances in graph-based deep learning, and in particular graph representation learning, deep graph generation methods have recently emerged with new applications ranging from discovering novel molecular structures to modeling social networks. This paper conducts a comprehensive survey on deep learning-based graph generation approaches and classifies them into five broad categories, namely, autoregressive, autoencoder-based, RL-based, adversarial, and flow-based graph generators, providing the readers a detailed description of the methods in each class. We also present publicly available source codes, commonly used datasets, and the most widely utilized evaluation metrics. Finally, we highlight the existing challenges and discuss future research directions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.15544v1-abstract-full').style.display = 'none'; document.getElementById('2012.15544v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 December, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2012.06198">arXiv:2012.06198</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2012.06198">pdf</a>, <a href="https://arxiv.org/format/2012.06198">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> </div> <p class="title is-5 mathjax"> On the Observability and Controllability of Large-Scale IoT Networks: Reducing Number of Unmatched Nodes via Link Addition </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Doostmohammadian%2C+M">Mohammadreza Doostmohammadian</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2012.06198v1-abstract-short" style="display: inline;"> In this paper, we study large-scale networks in terms of observability and controllability. In particular, we compare the number of unmatched nodes in two main types of Scale-Free (SF) networks: the Barab{谩}si-Albert (BA) model and the Holme-Kim (HK) model. Comparing the two models based on theory and simulation, we discuss the possible relation between clustering coefficient and the number of unm&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.06198v1-abstract-full').style.display = 'inline'; document.getElementById('2012.06198v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2012.06198v1-abstract-full" style="display: none;"> In this paper, we study large-scale networks in terms of observability and controllability. In particular, we compare the number of unmatched nodes in two main types of Scale-Free (SF) networks: the Barab{谩}si-Albert (BA) model and the Holme-Kim (HK) model. Comparing the two models based on theory and simulation, we discuss the possible relation between clustering coefficient and the number of unmatched nodes. In this direction, we propose a new algorithm to reduce the number of unmatched nodes via link addition. The results are significant as one can reduce the number of unmatched nodes and therefore number of embedded sensors/actuators in, for example, an IoT network. This may significantly reduce the cost of controlling devices or monitoring cost in large-scale systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.06198v1-abstract-full').style.display = 'none'; document.getElementById('2012.06198v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 December, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2011.11736">arXiv:2011.11736</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2011.11736">pdf</a>, <a href="https://arxiv.org/format/2011.11736">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Accurate and Rapid Diagnosis of COVID-19 Pneumonia with Batch Effect Removal of Chest CT-Scans and Interpretable Artificial Intelligence </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Modegh%2C+R+G">Rassa Ghavami Modegh</a>, <a href="/search/cs?searchtype=author&amp;query=Hamidi%2C+M">Mehrab Hamidi</a>, <a href="/search/cs?searchtype=author&amp;query=Masoudian%2C+S">Saeed Masoudian</a>, <a href="/search/cs?searchtype=author&amp;query=Mohseni%2C+A">Amir Mohseni</a>, <a href="/search/cs?searchtype=author&amp;query=Lotfalinezhad%2C+H">Hamzeh Lotfalinezhad</a>, <a href="/search/cs?searchtype=author&amp;query=Kazemi%2C+M+A">Mohammad Ali Kazemi</a>, <a href="/search/cs?searchtype=author&amp;query=Moradi%2C+B">Behnaz Moradi</a>, <a href="/search/cs?searchtype=author&amp;query=Ghafoori%2C+M">Mahyar Ghafoori</a>, <a href="/search/cs?searchtype=author&amp;query=Motamedi%2C+O">Omid Motamedi</a>, <a href="/search/cs?searchtype=author&amp;query=Pournik%2C+O">Omid Pournik</a>, <a href="/search/cs?searchtype=author&amp;query=Rezaei-Kalantari%2C+K">Kiara Rezaei-Kalantari</a>, <a href="/search/cs?searchtype=author&amp;query=Manteghinezhad%2C+A">Amirreza Manteghinezhad</a>, <a href="/search/cs?searchtype=author&amp;query=Javanmard%2C+S+H">Shaghayegh Haghjooy Javanmard</a>, <a href="/search/cs?searchtype=author&amp;query=Nezhad%2C+F+A">Fateme Abdoli Nezhad</a>, <a href="/search/cs?searchtype=author&amp;query=Enhesari%2C+A">Ahmad Enhesari</a>, <a href="/search/cs?searchtype=author&amp;query=Kheyrkhah%2C+M+S">Mohammad Saeed Kheyrkhah</a>, <a href="/search/cs?searchtype=author&amp;query=Eghtesadi%2C+R">Razieh Eghtesadi</a>, <a href="/search/cs?searchtype=author&amp;query=Azadbakht%2C+J">Javid Azadbakht</a>, <a href="/search/cs?searchtype=author&amp;query=Aliasgharzadeh%2C+A">Akbar Aliasgharzadeh</a>, <a href="/search/cs?searchtype=author&amp;query=Sharif%2C+M+R">Mohammad Reza Sharif</a>, <a href="/search/cs?searchtype=author&amp;query=Khaleghi%2C+A">Ali Khaleghi</a>, <a href="/search/cs?searchtype=author&amp;query=Foroutan%2C+A">Abbas Foroutan</a>, <a href="/search/cs?searchtype=author&amp;query=Ghanaati%2C+H">Hossein Ghanaati</a>, <a href="/search/cs?searchtype=author&amp;query=Dashti%2C+H">Hamed Dashti</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2011.11736v2-abstract-short" style="display: inline;"> COVID-19 is a virus with high transmission rate that demands rapid identification of the infected patients to reduce the spread of the disease. The current gold-standard test, Reverse-Transcription Polymerase Chain Reaction (RT-PCR), has a high rate of false negatives. Diagnosing from CT-scan images as a more accurate alternative has the challenge of distinguishing COVID-19 from other pneumonia di&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.11736v2-abstract-full').style.display = 'inline'; document.getElementById('2011.11736v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2011.11736v2-abstract-full" style="display: none;"> COVID-19 is a virus with high transmission rate that demands rapid identification of the infected patients to reduce the spread of the disease. The current gold-standard test, Reverse-Transcription Polymerase Chain Reaction (RT-PCR), has a high rate of false negatives. Diagnosing from CT-scan images as a more accurate alternative has the challenge of distinguishing COVID-19 from other pneumonia diseases. Artificial intelligence can help radiologists and physicians to accelerate the process of diagnosis, increase its accuracy, and measure the severity of the disease. We designed a new interpretable deep neural network to distinguish healthy people, patients with COVID-19, and patients with other pneumonia diseases from axial lung CT-scan images. Our model also detects the infected areas and calculates the percentage of the infected lung volume. We first preprocessed the images to eliminate the batch effects of different devices, and then adopted a weakly supervised method to train the model without having any tags for the infected parts. We trained and evaluated the model on a large dataset of 3359 samples from 6 different medical centers. The model reached sensitivities of 97.75% and 98.15%, and specificities of 87% and 81.03% in separating healthy people from the diseased and COVID-19 from other diseases, respectively. It also demonstrated similar performance for 1435 samples from 6 different medical centers which proves its generalizability. The performance of the model on a large diverse dataset, its generalizability, and interpretability makes it suitable to be used as a reliable diagnostic system. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.11736v2-abstract-full').style.display = 'none'; document.getElementById('2011.11736v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">27 pages, 4 figures. Some minor changes have been applied to the text, some fomulae are added to help the descriptions become more clear, two names and two names are corrected (The full version of the names are included)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2011.11108">arXiv:2011.11108</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2011.11108">pdf</a>, <a href="https://arxiv.org/format/2011.11108">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Multiresolution Knowledge Distillation for Anomaly Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Salehi%2C+M">Mohammadreza Salehi</a>, <a href="/search/cs?searchtype=author&amp;query=Sadjadi%2C+N">Niousha Sadjadi</a>, <a href="/search/cs?searchtype=author&amp;query=Baselizadeh%2C+S">Soroosh Baselizadeh</a>, <a href="/search/cs?searchtype=author&amp;query=Rohban%2C+M+H">Mohammad Hossein Rohban</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2011.11108v1-abstract-short" style="display: inline;"> Unsupervised representation learning has proved to be a critical component of anomaly detection/localization in images. The challenges to learn such a representation are two-fold. Firstly, the sample size is not often large enough to learn a rich generalizable representation through conventional techniques. Secondly, while only normal samples are available at training, the learned features should&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.11108v1-abstract-full').style.display = 'inline'; document.getElementById('2011.11108v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2011.11108v1-abstract-full" style="display: none;"> Unsupervised representation learning has proved to be a critical component of anomaly detection/localization in images. The challenges to learn such a representation are two-fold. Firstly, the sample size is not often large enough to learn a rich generalizable representation through conventional techniques. Secondly, while only normal samples are available at training, the learned features should be discriminative of normal and anomalous samples. Here, we propose to use the &#34;distillation&#34; of features at various layers of an expert network, pre-trained on ImageNet, into a simpler cloner network to tackle both issues. We detect and localize anomalies using the discrepancy between the expert and cloner networks&#39; intermediate activation values given the input data. We show that considering multiple intermediate hints in distillation leads to better exploiting the expert&#39;s knowledge and more distinctive discrepancy compared to solely utilizing the last layer activation values. Notably, previous methods either fail in precise anomaly localization or need expensive region-based training. In contrast, with no need for any special or intensive training procedure, we incorporate interpretability algorithms in our novel framework for the localization of anomalous regions. Despite the striking contrast between some test datasets and ImageNet, we achieve competitive or significantly superior results compared to the SOTA methods on MNIST, F-MNIST, CIFAR-10, MVTecAD, Retinal-OCT, and two Medical datasets on both anomaly detection and localization. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.11108v1-abstract-full').style.display = 'none'; document.getElementById('2011.11108v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2010.01400">arXiv:2010.01400</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2010.01400">pdf</a>, <a href="https://arxiv.org/format/2010.01400">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3599237">10.1145/3599237 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Joint Inference of Diffusion and Structure in Partially Observed Social Networks Using Coupled Matrix Factorization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ramezani%2C+M">Maryam Ramezani</a>, <a href="/search/cs?searchtype=author&amp;query=Ahadinia%2C+A">Aryan Ahadinia</a>, <a href="/search/cs?searchtype=author&amp;query=Ziaei%2C+A">Amirmohammad Ziaei</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2010.01400v2-abstract-short" style="display: inline;"> Access to complete data in large-scale networks is often infeasible. Therefore, the problem of missing data is a crucial and unavoidable issue in the analysis and modeling of real-world social networks. However, most of the research on different aspects of social networks does not consider this limitation. One effective way to solve this problem is to recover the missing data as a pre-processing s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.01400v2-abstract-full').style.display = 'inline'; document.getElementById('2010.01400v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2010.01400v2-abstract-full" style="display: none;"> Access to complete data in large-scale networks is often infeasible. Therefore, the problem of missing data is a crucial and unavoidable issue in the analysis and modeling of real-world social networks. However, most of the research on different aspects of social networks does not consider this limitation. One effective way to solve this problem is to recover the missing data as a pre-processing step. In this paper, a model is learned from partially observed data to infer unobserved diffusion and structure networks. To jointly discover omitted diffusion activities and hidden network structures, we develop a probabilistic generative model called &#34;DiffStru.&#34; The interrelations among links of nodes and cascade processes are utilized in the proposed method via learning coupled with low-dimensional latent factors. Besides inferring unseen data, latent factors such as community detection may also aid in network classification problems. We tested different missing data scenarios on simulated independent cascades over LFR networks and real datasets, including Twitter and Memtracker. Experiments on these synthetic and real-world datasets show that the proposed method successfully detects invisible social behaviors, predicts links, and identifies latent features. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.01400v2-abstract-full').style.display = 'none'; document.getElementById('2010.01400v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2008.12959">arXiv:2008.12959</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2008.12959">pdf</a>, <a href="https://arxiv.org/format/2008.12959">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Puzzle-AE: Novelty Detection in Images through Solving Puzzles </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Salehi%2C+M">Mohammadreza Salehi</a>, <a href="/search/cs?searchtype=author&amp;query=Eftekhar%2C+A">Ainaz Eftekhar</a>, <a href="/search/cs?searchtype=author&amp;query=Sadjadi%2C+N">Niousha Sadjadi</a>, <a href="/search/cs?searchtype=author&amp;query=Rohban%2C+M+H">Mohammad Hossein Rohban</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2008.12959v5-abstract-short" style="display: inline;"> Autoencoder, as an essential part of many anomaly detection methods, is lacking flexibility on normal data in complex datasets. U-Net is proved to be effective for this purpose but overfits on the training data if trained by just using reconstruction error similar to other AE-based frameworks. Puzzle-solving, as a pretext task of self-supervised learning (SSL) methods, has earlier proved its abili&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.12959v5-abstract-full').style.display = 'inline'; document.getElementById('2008.12959v5-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2008.12959v5-abstract-full" style="display: none;"> Autoencoder, as an essential part of many anomaly detection methods, is lacking flexibility on normal data in complex datasets. U-Net is proved to be effective for this purpose but overfits on the training data if trained by just using reconstruction error similar to other AE-based frameworks. Puzzle-solving, as a pretext task of self-supervised learning (SSL) methods, has earlier proved its ability in learning semantically meaningful features. We show that training U-Nets based on this task is an effective remedy that prevents overfitting and facilitates learning beyond pixel-level features. Shortcut solutions, however, are a big challenge in SSL tasks, including jigsaw puzzles. We propose adversarial robust training as an effective automatic shortcut removal. We achieve competitive or superior results compared to the State of the Art (SOTA) anomaly detection methods on various toy and real-world datasets. Unlike many competitors, the proposed framework is stable, fast, data-efficient, and does not require unprincipled early stopping. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.12959v5-abstract-full').style.display = 'none'; document.getElementById('2008.12959v5-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 August, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The paper is under consideration at Computer Vision and Image Understanding</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2003.05669">arXiv:2003.05669</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2003.05669">pdf</a>, <a href="https://arxiv.org/format/2003.05669">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> ARAE: Adversarially Robust Training of Autoencoders Improves Novelty Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Salehi%2C+M">Mohammadreza Salehi</a>, <a href="/search/cs?searchtype=author&amp;query=Arya%2C+A">Atrin Arya</a>, <a href="/search/cs?searchtype=author&amp;query=Pajoum%2C+B">Barbod Pajoum</a>, <a href="/search/cs?searchtype=author&amp;query=Otoofi%2C+M">Mohammad Otoofi</a>, <a href="/search/cs?searchtype=author&amp;query=Shaeiri%2C+A">Amirreza Shaeiri</a>, <a href="/search/cs?searchtype=author&amp;query=Rohban%2C+M+H">Mohammad Hossein Rohban</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2003.05669v2-abstract-short" style="display: inline;"> Autoencoders (AE) have recently been widely employed to approach the novelty detection problem. Trained only on the normal data, the AE is expected to reconstruct the normal data effectively while fail to regenerate the anomalous data, which could be utilized for novelty detection. However, in this paper, it is demonstrated that this does not always hold. AE often generalizes so perfectly that it&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.05669v2-abstract-full').style.display = 'inline'; document.getElementById('2003.05669v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2003.05669v2-abstract-full" style="display: none;"> Autoencoders (AE) have recently been widely employed to approach the novelty detection problem. Trained only on the normal data, the AE is expected to reconstruct the normal data effectively while fail to regenerate the anomalous data, which could be utilized for novelty detection. However, in this paper, it is demonstrated that this does not always hold. AE often generalizes so perfectly that it can also reconstruct the anomalous data well. To address this problem, we propose a novel AE that can learn more semantically meaningful features. Specifically, we exploit the fact that adversarial robustness promotes learning of meaningful features. Therefore, we force the AE to learn such features by penalizing networks with a bottleneck layer that is unstable against adversarial perturbations. We show that despite using a much simpler architecture in comparison to the prior methods, the proposed AE outperforms or is competitive to state-of-the-art on three benchmark datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.05669v2-abstract-full').style.display = 'none'; document.getElementById('2003.05669v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 March, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1909.06868">arXiv:1909.06868</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1909.06868">pdf</a>, <a href="https://arxiv.org/format/1909.06868">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> ChOracle: A Unified Statistical Framework for Churn Prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Khodadadi%2C+A">Ali Khodadadi</a>, <a href="/search/cs?searchtype=author&amp;query=Hosseini%2C+S+A">Seyed Abbas Hosseini</a>, <a href="/search/cs?searchtype=author&amp;query=Pajouheshgar%2C+E">Ehsan Pajouheshgar</a>, <a href="/search/cs?searchtype=author&amp;query=Mansouri%2C+F">Farnam Mansouri</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1909.06868v1-abstract-short" style="display: inline;"> User churn is an important issue in online services that threatens the health and profitability of services. Most of the previous works on churn prediction convert the problem into a binary classification task where the users are labeled as churned and non-churned. More recently, some works have tried to convert the user churn prediction problem into the prediction of user return time. In this app&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.06868v1-abstract-full').style.display = 'inline'; document.getElementById('1909.06868v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1909.06868v1-abstract-full" style="display: none;"> User churn is an important issue in online services that threatens the health and profitability of services. Most of the previous works on churn prediction convert the problem into a binary classification task where the users are labeled as churned and non-churned. More recently, some works have tried to convert the user churn prediction problem into the prediction of user return time. In this approach which is more realistic in real world online services, at each time-step the model predicts the user return time instead of predicting a churn label. However, the previous works in this category suffer from lack of generality and require high computational complexity. In this paper, we introduce \emph{ChOracle}, an oracle that predicts the user churn by modeling the user return times to service by utilizing a combination of Temporal Point Processes and Recurrent Neural Networks. Moreover, we incorporate latent variables into the proposed recurrent neural network to model the latent user loyalty to the system. We also develop an efficient approximate variational algorithm for learning parameters of the proposed RNN by using back propagation through time. Finally, we demonstrate the superior performance of ChOracle on a wide variety of real world datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.06868v1-abstract-full').style.display = 'none'; document.getElementById('1909.06868v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 September, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1906.03423">arXiv:1906.03423</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1906.03423">pdf</a>, <a href="https://arxiv.org/format/1906.03423">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3341161.3342957">10.1145/3341161.3342957 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> News Labeling as Early as Possible: Real or Fake? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ramezani%2C+M">Maryam Ramezani</a>, <a href="/search/cs?searchtype=author&amp;query=Rafiei%2C+M">Mina Rafiei</a>, <a href="/search/cs?searchtype=author&amp;query=Omranpour%2C+S">Soroush Omranpour</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1906.03423v1-abstract-short" style="display: inline;"> Making disguise between real and fake news propagation through online social networks is an important issue in many applications. The time gap between the news release time and detection of its label is a significant step towards broadcasting the real information and avoiding the fake. Therefore, one of the challenging tasks in this area is to identify fake and real news in early stages of propaga&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1906.03423v1-abstract-full').style.display = 'inline'; document.getElementById('1906.03423v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1906.03423v1-abstract-full" style="display: none;"> Making disguise between real and fake news propagation through online social networks is an important issue in many applications. The time gap between the news release time and detection of its label is a significant step towards broadcasting the real information and avoiding the fake. Therefore, one of the challenging tasks in this area is to identify fake and real news in early stages of propagation. However, there is a trade-off between minimizing the time gap and maximizing accuracy. Despite recent efforts in detection of fake news, there has been no significant work that explicitly incorporates early detection in its model. In this paper, we focus on accurate early labeling of news, and propose a model by considering earliness both in modeling and prediction. The proposed method utilizes recurrent neural networks with a novel loss function, and a new stopping rule. Given the context of news, we first embed it with a class-specific text representation. Then, we utilize the available public profile of users, and speed of news diffusion, for early labeling of the news. Experiments on real datasets demonstrate the effectiveness of our model both in terms of early labelling and accuracy, compared to the state of the art baseline and models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1906.03423v1-abstract-full').style.display = 'none'; document.getElementById('1906.03423v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 June, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1903.12371">arXiv:1903.12371</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1903.12371">pdf</a>, <a href="https://arxiv.org/ps/1903.12371">ps</a>, <a href="https://arxiv.org/format/1903.12371">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/JSYST.2019.2900027">10.1109/JSYST.2019.2900027 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Cyber-Social Systems: Modeling, Inference, and Optimal Design </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Doostmohammadian%2C+M">Mohammadreza Doostmohammadian</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a>, <a href="/search/cs?searchtype=author&amp;query=Khan%2C+U+A">Usman A. Khan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1903.12371v1-abstract-short" style="display: inline;"> This paper models the cyber-social system as a cyber-network of agents monitoring states of individuals in a social network. The state of each individual is represented by a social node and the interactions among individuals are represented by a social link. In the cyber-network each node represents an agent and the links represent information sharing among agents. Agents make an observation of so&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1903.12371v1-abstract-full').style.display = 'inline'; document.getElementById('1903.12371v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1903.12371v1-abstract-full" style="display: none;"> This paper models the cyber-social system as a cyber-network of agents monitoring states of individuals in a social network. The state of each individual is represented by a social node and the interactions among individuals are represented by a social link. In the cyber-network each node represents an agent and the links represent information sharing among agents. Agents make an observation of social states and perform distributed inference. In this direction, the contribution of this work is threefold: (i) A novel distributed inference protocol is proposed that makes no assumption on the rank of the underlying social system. This is significant as most protocols in the literature only work on full-rank systems. (ii) A novel agent classification is developed, where it is shown that connectivity requirement on the cyber-network differs for each type. This is particularly important in finding the minimal number of observations and minimal connectivity of the cyber-network as the next contribution. (iii) The cost-optimal design of cyber-network constraint with distributed observability is addressed. This problem is subdivided into sensing cost optimization and networking cost optimization where both are claimed to be NP-hard. We solve both problems for certain types of social networks and find polynomial-order solutions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1903.12371v1-abstract-full').style.display = 'none'; document.getElementById('1903.12371v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 March, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 7 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEEE systems journal, 2019 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1902.00329">arXiv:1902.00329</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1902.00329">pdf</a>, <a href="https://arxiv.org/format/1902.00329">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Privacy Against Brute-Force Inference Attacks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Osia%2C+S+A">Seyed Ali Osia</a>, <a href="/search/cs?searchtype=author&amp;query=Rassouli%2C+B">Borzoo Rassouli</a>, <a href="/search/cs?searchtype=author&amp;query=Haddadi%2C+H">Hamed Haddadi</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a>, <a href="/search/cs?searchtype=author&amp;query=G%C3%BCnd%C3%BCz%2C+D">Deniz G眉nd眉z</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1902.00329v1-abstract-short" style="display: inline;"> Privacy-preserving data release is about disclosing information about useful data while retaining the privacy of sensitive data. Assuming that the sensitive data is threatened by a brute-force adversary, we define Guessing Leakage as a measure of privacy, based on the concept of guessing. After investigating the properties of this measure, we derive the optimal utility-privacy trade-off via a line&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1902.00329v1-abstract-full').style.display = 'inline'; document.getElementById('1902.00329v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1902.00329v1-abstract-full" style="display: none;"> Privacy-preserving data release is about disclosing information about useful data while retaining the privacy of sensitive data. Assuming that the sensitive data is threatened by a brute-force adversary, we define Guessing Leakage as a measure of privacy, based on the concept of guessing. After investigating the properties of this measure, we derive the optimal utility-privacy trade-off via a linear program with any $f$-information adopted as the utility measure, and show that the optimal utility is a concave and piece-wise linear function of the privacy-leakage budget. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1902.00329v1-abstract-full').style.display = 'none'; document.getElementById('1902.00329v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 February, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1811.08812">arXiv:1811.08812</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1811.08812">pdf</a>, <a href="https://arxiv.org/format/1811.08812">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Adversarial Classifier for Imbalanced Problems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Montahaei%2C+E">Ehsan Montahaei</a>, <a href="/search/cs?searchtype=author&amp;query=Ghorbani%2C+M">Mahsa Ghorbani</a>, <a href="/search/cs?searchtype=author&amp;query=Baghshah%2C+M+S">Mahdieh Soleymani Baghshah</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1811.08812v1-abstract-short" style="display: inline;"> Adversarial approach has been widely used for data generation in the last few years. However, this approach has not been extensively utilized for classifier training. In this paper, we propose an adversarial framework for classifier training that can also handle imbalanced data. Indeed, a network is trained via an adversarial approach to give weights to samples of the majority class such that the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.08812v1-abstract-full').style.display = 'inline'; document.getElementById('1811.08812v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1811.08812v1-abstract-full" style="display: none;"> Adversarial approach has been widely used for data generation in the last few years. However, this approach has not been extensively utilized for classifier training. In this paper, we propose an adversarial framework for classifier training that can also handle imbalanced data. Indeed, a network is trained via an adversarial approach to give weights to samples of the majority class such that the obtained classification problem becomes more challenging for the discriminator and thus boosts its classification capability. In addition to the general imbalanced classification problems, the proposed method can also be used for problems such as graph representation learning in which it is desired to discriminate similar nodes from dissimilar nodes. Experimental results on imbalanced data classification and on the tasks like graph link prediction show the superiority of the proposed method compared to the state-of-the-art methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.08812v1-abstract-full').style.display = 'none'; document.getElementById('1811.08812v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1811.08800">arXiv:1811.08800</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1811.08800">pdf</a>, <a href="https://arxiv.org/format/1811.08800">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3341161.3342942">10.1145/3341161.3342942 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> MGCN: Semi-supervised Classification in Multi-layer Graphs with Graph Convolutional Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ghorbani%2C+M">Mahsa Ghorbani</a>, <a href="/search/cs?searchtype=author&amp;query=Baghshah%2C+M+S">Mahdieh Soleymani Baghshah</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1811.08800v3-abstract-short" style="display: inline;"> Graph embedding is an important approach for graph analysis tasks such as node classification and link prediction. The goal of graph embedding is to find a low dimensional representation of graph nodes that preserves the graph information. Recent methods like Graph Convolutional Network (GCN) try to consider node attributes (if available) besides node relations and learn node embeddings for unsupe&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.08800v3-abstract-full').style.display = 'inline'; document.getElementById('1811.08800v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1811.08800v3-abstract-full" style="display: none;"> Graph embedding is an important approach for graph analysis tasks such as node classification and link prediction. The goal of graph embedding is to find a low dimensional representation of graph nodes that preserves the graph information. Recent methods like Graph Convolutional Network (GCN) try to consider node attributes (if available) besides node relations and learn node embeddings for unsupervised and semi-supervised tasks on graphs. On the other hand, multi-layer graph analysis has been received attention recently. However, the existing methods for multi-layer graph embedding cannot incorporate all available information (like node attributes). Moreover, most of them consider either type of nodes or type of edges, and they do not treat within and between layer edges differently. In this paper, we propose a method called MGCN that utilizes the GCN for multi-layer graphs. MGCN embeds nodes of multi-layer graphs using both within and between layers relations and nodes attributes. We evaluate our method on the semi-supervised node classification task. Experimental results demonstrate the superiority of the proposed method to other multi-layer and single-layer competitors and also show the positive effect of using cross-layer edges. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.08800v3-abstract-full').style.display = 'none'; document.getElementById('1811.08800v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 August, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 21 November, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1810.07845">arXiv:1810.07845</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1810.07845">pdf</a>, <a href="https://arxiv.org/format/1810.07845">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> On Statistical Learning of Simplices: Unmixing Problem Revisited </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Najafi%2C+A">Amir Najafi</a>, <a href="/search/cs?searchtype=author&amp;query=Ilchi%2C+S">Saeed Ilchi</a>, <a href="/search/cs?searchtype=author&amp;query=Saberi%2C+A+H">Amir H. Saberi</a>, <a href="/search/cs?searchtype=author&amp;query=Motahari%2C+S+A">Seyed Abolfazl Motahari</a>, <a href="/search/cs?searchtype=author&amp;query=Khalaj%2C+B+H">Babak H. Khalaj</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1810.07845v4-abstract-short" style="display: inline;"> We study the sample complexity of learning a high-dimensional simplex from a set of points uniformly sampled from its interior. Learning of simplices is a long studied problem in computer science and has applications in computational biology and remote sensing, mostly under the name of `spectral unmixing&#39;. We theoretically show that a sufficient sample complexity for reliable learning of a $K$-dim&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1810.07845v4-abstract-full').style.display = 'inline'; document.getElementById('1810.07845v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1810.07845v4-abstract-full" style="display: none;"> We study the sample complexity of learning a high-dimensional simplex from a set of points uniformly sampled from its interior. Learning of simplices is a long studied problem in computer science and has applications in computational biology and remote sensing, mostly under the name of `spectral unmixing&#39;. We theoretically show that a sufficient sample complexity for reliable learning of a $K$-dimensional simplex up to a total-variation error of $蔚$ is $O\left(\frac{K^2}蔚\log\frac{K}蔚\right)$, which yields a substantial improvement over existing bounds. Based on our new theoretical framework, we also propose a heuristic approach for the inference of simplices. Experimental results on synthetic and real-world datasets demonstrate a comparable performance for our method on noiseless samples, while we outperform the state-of-the-art in noisy cases. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1810.07845v4-abstract-full').style.display = 'none'; document.getElementById('1810.07845v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 August, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 October, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">32 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1804.01799">arXiv:1804.01799</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1804.01799">pdf</a>, <a href="https://arxiv.org/ps/1804.01799">ps</a>, <a href="https://arxiv.org/format/1804.01799">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/LSP.2018.2824761">10.1109/LSP.2018.2824761 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Structural cost-optimal design of sensor networks for distributed estimation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Doostmohammadian%2C+M">Mohammadreza Doostmohammadian</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a>, <a href="/search/cs?searchtype=author&amp;query=Khan%2C+U+A">Usman A. Khan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1804.01799v1-abstract-short" style="display: inline;"> In this letter we discuss cost optimization of sensor networks monitoring structurally full-rank systems under distributed observability constraint. Using structured systems theory, the problem is relaxed into two subproblems: (i) sensing cost optimization and (ii) networking cost optimization. Both problems are reformulated as combinatorial optimization problems. The sensing cost optimization is&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1804.01799v1-abstract-full').style.display = 'inline'; document.getElementById('1804.01799v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1804.01799v1-abstract-full" style="display: none;"> In this letter we discuss cost optimization of sensor networks monitoring structurally full-rank systems under distributed observability constraint. Using structured systems theory, the problem is relaxed into two subproblems: (i) sensing cost optimization and (ii) networking cost optimization. Both problems are reformulated as combinatorial optimization problems. The sensing cost optimization is shown to have a polynomial order solution. The networking cost optimization is shown to be NP-hard in general, but has a polynomial order solution under specific conditions. A 2-approximation polynomial order relaxation is provided for general networking cost optimization, which is applicable in large-scale system monitoring. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1804.01799v1-abstract-full').style.display = 'none'; document.getElementById('1804.01799v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 April, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEEE Signal Processing Letters 2018 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1802.07244">arXiv:1802.07244</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1802.07244">pdf</a>, <a href="https://arxiv.org/format/1802.07244">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Steering Social Activity: A Stochastic Optimal Control Point Of View </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zarezade%2C+A">Ali Zarezade</a>, <a href="/search/cs?searchtype=author&amp;query=De%2C+A">Abir De</a>, <a href="/search/cs?searchtype=author&amp;query=Upadhyay%2C+U">Utkarsh Upadhyay</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a>, <a href="/search/cs?searchtype=author&amp;query=Gomez-Rodriguez%2C+M">Manuel Gomez-Rodriguez</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1802.07244v1-abstract-short" style="display: inline;"> User engagement in online social networking depends critically on the level of social activity in the corresponding platform--the number of online actions, such as posts, shares or replies, taken by their users. Can we design data-driven algorithms to increase social activity? At a user level, such algorithms may increase activity by helping users decide when to take an action to be more likely to&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1802.07244v1-abstract-full').style.display = 'inline'; document.getElementById('1802.07244v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1802.07244v1-abstract-full" style="display: none;"> User engagement in online social networking depends critically on the level of social activity in the corresponding platform--the number of online actions, such as posts, shares or replies, taken by their users. Can we design data-driven algorithms to increase social activity? At a user level, such algorithms may increase activity by helping users decide when to take an action to be more likely to be noticed by their peers. At a network level, they may increase activity by incentivizing a few influential users to take more actions, which in turn will trigger additional actions by other users. In this paper, we model social activity using the framework of marked temporal point processes, derive an alternate representation of these processes using stochastic differential equations (SDEs) with jumps and, exploiting this alternate representation, develop two efficient online algorithms with provable guarantees to steer social activity both at a user and at a network level. In doing so, we establish a previously unexplored connection between optimal control of jump SDEs and doubly stochastic marked temporal point processes, which is of independent interest. Finally, we experiment both with synthetic and real data gathered from Twitter and show that our algorithms consistently steer social activity more effectively than the state of the art. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1802.07244v1-abstract-full').style.display = 'none'; document.getElementById('1802.07244v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 February, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in JMLR 2018. arXiv admin note: substantial text overlap with arXiv:1610.05773, arXiv:1703.02059</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1802.03151">arXiv:1802.03151</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1802.03151">pdf</a>, <a href="https://arxiv.org/format/1802.03151">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Deep Private-Feature Extraction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Osia%2C+S+A">Seyed Ali Osia</a>, <a href="/search/cs?searchtype=author&amp;query=Taheri%2C+A">Ali Taheri</a>, <a href="/search/cs?searchtype=author&amp;query=Shamsabadi%2C+A+S">Ali Shahin Shamsabadi</a>, <a href="/search/cs?searchtype=author&amp;query=Katevas%2C+K">Kleomenis Katevas</a>, <a href="/search/cs?searchtype=author&amp;query=Haddadi%2C+H">Hamed Haddadi</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1802.03151v2-abstract-short" style="display: inline;"> We present and evaluate Deep Private-Feature Extractor (DPFE), a deep model which is trained and evaluated based on information theoretic constraints. Using the selective exchange of information between a user&#39;s device and a service provider, DPFE enables the user to prevent certain sensitive information from being shared with a service provider, while allowing them to extract approved information&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1802.03151v2-abstract-full').style.display = 'inline'; document.getElementById('1802.03151v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1802.03151v2-abstract-full" style="display: none;"> We present and evaluate Deep Private-Feature Extractor (DPFE), a deep model which is trained and evaluated based on information theoretic constraints. Using the selective exchange of information between a user&#39;s device and a service provider, DPFE enables the user to prevent certain sensitive information from being shared with a service provider, while allowing them to extract approved information using their model. We introduce and utilize the log-rank privacy, a novel measure to assess the effectiveness of DPFE in removing sensitive information and compare different models based on their accuracy-privacy tradeoff. We then implement and evaluate the performance of DPFE on smartphones to understand its complexity, resource demands, and efficiency tradeoffs. Our results on benchmark image datasets demonstrate that under moderate resource utilization, DPFE can achieve high accuracy for primary tasks while preserving the privacy of sensitive features. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1802.03151v2-abstract-full').style.display = 'none'; document.getElementById('1802.03151v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 February, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 February, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1710.02101">arXiv:1710.02101</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1710.02101">pdf</a>, <a href="https://arxiv.org/ps/1710.02101">ps</a>, <a href="https://arxiv.org/format/1710.02101">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Reliable Clustering of Bernoulli Mixture Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Najafi%2C+A">Amir Najafi</a>, <a href="/search/cs?searchtype=author&amp;query=Motahari%2C+A">Abolfazl Motahari</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1710.02101v3-abstract-short" style="display: inline;"> A Bernoulli Mixture Model (BMM) is a finite mixture of random binary vectors with independent dimensions. The problem of clustering BMM data arises in a variety of real-world applications, ranging from population genetics to activity analysis in social networks. In this paper, we analyze the clusterability of BMMs from a theoretical perspective, when the number of clusters is unknown. In particula&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1710.02101v3-abstract-full').style.display = 'inline'; document.getElementById('1710.02101v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1710.02101v3-abstract-full" style="display: none;"> A Bernoulli Mixture Model (BMM) is a finite mixture of random binary vectors with independent dimensions. The problem of clustering BMM data arises in a variety of real-world applications, ranging from population genetics to activity analysis in social networks. In this paper, we analyze the clusterability of BMMs from a theoretical perspective, when the number of clusters is unknown. In particular, we stipulate a set of conditions on the sample complexity and dimension of the model in order to guarantee the Probably Approximately Correct (PAC)-clusterability of a dataset. To the best of our knowledge, these findings are the first non-asymptotic bounds on the sample complexity of learning or clustering BMMs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1710.02101v3-abstract-full').style.display = 'none'; document.getElementById('1710.02101v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 June, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 October, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">22 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1710.01727">arXiv:1710.01727</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1710.01727">pdf</a>, <a href="https://arxiv.org/ps/1710.01727">ps</a>, <a href="https://arxiv.org/format/1710.01727">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Privacy-Preserving Deep Inference for Rich User Data on The Cloud </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Osia%2C+S+A">Seyed Ali Osia</a>, <a href="/search/cs?searchtype=author&amp;query=Shamsabadi%2C+A+S">Ali Shahin Shamsabadi</a>, <a href="/search/cs?searchtype=author&amp;query=Taheri%2C+A">Ali Taheri</a>, <a href="/search/cs?searchtype=author&amp;query=Katevas%2C+K">Kleomenis Katevas</a>, <a href="/search/cs?searchtype=author&amp;query=Rabiee%2C+H+R">Hamid R. Rabiee</a>, <a href="/search/cs?searchtype=author&amp;query=Lane%2C+N+D">Nicholas D. Lane</a>, <a href="/search/cs?searchtype=author&amp;query=Haddadi%2C+H">Hamed Haddadi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1710.01727v3-abstract-short" style="display: inline;"> Deep neural networks are increasingly being used in a variety of machine learning applications applied to rich user data on the cloud. However, this approach introduces a number of privacy and efficiency challenges, as the cloud operator can perform secondary inferences on the available data. Recently, advances in edge processing have paved the way for more efficient, and private, data processing&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1710.01727v3-abstract-full').style.display = 'inline'; document.getElementById('1710.01727v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1710.01727v3-abstract-full" style="display: none;"> Deep neural networks are increasingly being used in a variety of machine learning applications applied to rich user data on the cloud. However, this approach introduces a number of privacy and efficiency challenges, as the cloud operator can perform secondary inferences on the available data. Recently, advances in edge processing have paved the way for more efficient, and private, data processing at the source for simple tasks and lighter models, though they remain a challenge for larger, and more complicated models. In this paper, we present a hybrid approach for breaking down large, complex deep models for cooperative, privacy-preserving analytics. We do this by breaking down the popular deep architectures and fine-tune them in a particular way. We then evaluate the privacy benefits of this approach based on the information exposed to the cloud service. We also asses the local inference cost of different layers on a modern handset for mobile applications. Our evaluations show that by using certain kind of fine-tuning and embedding techniques and at a small processing costs, we can greatly reduce the level of information available to unintended tasks applied to the data feature on the cloud, and hence achieving the desired tradeoff between privacy and performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1710.01727v3-abstract-full').style.display = 'none'; document.getElementById('1710.01727v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 October, 2017; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 October, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: substantial text overlap with arXiv:1703.02952</span> </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Rabiee%2C+H+R&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Rabiee%2C+H+R&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Rabiee%2C+H+R&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10