CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 54 results for author: <span class="mathjax">Suzumura, T</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Suzumura%2C+T">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Suzumura, T"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Suzumura%2C+T&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Suzumura, T"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Suzumura%2C+T&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Suzumura%2C+T&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Suzumura%2C+T&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.16155">arXiv:2411.16155</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.16155">pdf</a>, <a href="https://arxiv.org/format/2411.16155">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Graph Adapter of EEG Foundation Models for Parameter Efficient Fine Tuning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Kanezashi%2C+H">Hiroki Kanezashi</a>, <a href="/search/cs?searchtype=author&amp;query=Akahori%2C+S">Shotaro Akahori</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.16155v1-abstract-short" style="display: inline;"> In diagnosing mental diseases from electroencephalography (EEG) data, neural network models such as Transformers have been employed to capture temporal dynamics. Additionally, it is crucial to learn the spatial relationships between EEG sensors, for which Graph Neural Networks (GNNs) are commonly used. However, fine-tuning large-scale complex neural network models simultaneously to capture both te&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16155v1-abstract-full').style.display = 'inline'; document.getElementById('2411.16155v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.16155v1-abstract-full" style="display: none;"> In diagnosing mental diseases from electroencephalography (EEG) data, neural network models such as Transformers have been employed to capture temporal dynamics. Additionally, it is crucial to learn the spatial relationships between EEG sensors, for which Graph Neural Networks (GNNs) are commonly used. However, fine-tuning large-scale complex neural network models simultaneously to capture both temporal and spatial features increases computational costs due to the more significant number of trainable parameters. It causes the limited availability of EEG datasets for downstream tasks, making it challenging to fine-tune large models effectively. We propose EEG-GraphAdapter (EGA), a parameter-efficient fine-tuning (PEFT) approach to address these challenges. EGA is integrated into pre-trained temporal backbone models as a GNN-based module and fine-tuned itself alone while keeping the backbone model parameters frozen. This enables the acquisition of spatial representations of EEG signals for downstream tasks, significantly reducing computational overhead and data requirements. Experimental evaluations on healthcare-related downstream tasks of Major Depressive Disorder and Abnormality Detection demonstrate that our EGA improves performance by up to 16.1% in the F1-score compared with the backbone BENDR model. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.16155v1-abstract-full').style.display = 'none'; document.getElementById('2411.16155v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Under review</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.03265">arXiv:2410.03265</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.03265">pdf</a>, <a href="https://arxiv.org/format/2410.03265">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> </div> <p class="title is-5 mathjax"> Multimodal Point-of-Interest Recommendation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kanzawa%2C+Y">Yuta Kanzawa</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Kanezashi%2C+H">Hiroki Kanezashi</a>, <a href="/search/cs?searchtype=author&amp;query=Yong%2C+J">Jiawei Yong</a>, <a href="/search/cs?searchtype=author&amp;query=Fukushima%2C+S">Shintaro Fukushima</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.03265v2-abstract-short" style="display: inline;"> Large Language Models are applied to recommendation tasks such as items to buy and news articles to read. Point of Interest is quite a new area to sequential recommendation based on language representations of multimodal datasets. As a first step to prove our concepts, we focused on restaurant recommendation based on each user&#39;s past visit history. When choosing a next restaurant to visit, a user&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.03265v2-abstract-full').style.display = 'inline'; document.getElementById('2410.03265v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.03265v2-abstract-full" style="display: none;"> Large Language Models are applied to recommendation tasks such as items to buy and news articles to read. Point of Interest is quite a new area to sequential recommendation based on language representations of multimodal datasets. As a first step to prove our concepts, we focused on restaurant recommendation based on each user&#39;s past visit history. When choosing a next restaurant to visit, a user would consider genre and location of the venue and, if available, pictures of dishes served there. We created a pseudo restaurant check-in history dataset from the Foursquare dataset and the FoodX-251 dataset by converting pictures into text descriptions with a multimodal model called LLaVA, and used a language-based sequential recommendation framework named Recformer proposed in 2023. A model trained on this semi-multimodal dataset has outperformed another model trained on the same dataset without picture descriptions. This suggests that this semi-multimodal model reflects actual human behaviours and that our path to a multimodal recommendation model is in the right direction. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.03265v2-abstract-full').style.display = 'none'; document.getElementById('2410.03265v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.16674">arXiv:2409.16674</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.16674">pdf</a>, <a href="https://arxiv.org/format/2409.16674">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> </div> </div> <p class="title is-5 mathjax"> A Prompting-Based Representation Learning Method for Recommendation with Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Junyi Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.16674v3-abstract-short" style="display: inline;"> In recent years, Recommender Systems (RS) have witnessed a transformative shift with the advent of Large Language Models (LLMs) in the field of Natural Language Processing (NLP). Models such as GPT-3.5/4, Llama, have demonstrated unprecedented capabilities in understanding and generating human-like text. The extensive information pre-trained by these LLMs allows for the potential to capture a more&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.16674v3-abstract-full').style.display = 'inline'; document.getElementById('2409.16674v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.16674v3-abstract-full" style="display: none;"> In recent years, Recommender Systems (RS) have witnessed a transformative shift with the advent of Large Language Models (LLMs) in the field of Natural Language Processing (NLP). Models such as GPT-3.5/4, Llama, have demonstrated unprecedented capabilities in understanding and generating human-like text. The extensive information pre-trained by these LLMs allows for the potential to capture a more profound semantic representation from different contextual information of users and items. While the great potential lies behind the thriving of LLMs, the challenge of leveraging user-item preferences from contextual information and its alignment with the improvement of Recommender Systems needs to be addressed. Believing that a better understanding of the user or item itself can be the key factor in improving recommendation performance, we conduct research on generating informative profiles using state-of-the-art LLMs. To boost the linguistic abilities of LLMs in Recommender Systems, we introduce the Prompting-Based Representation Learning Method for Recommendation (P4R). In our P4R framework, we utilize the LLM prompting strategy to create personalized item profiles. These profiles are then transformed into semantic representation spaces using a pre-trained BERT model for text embedding. Furthermore, we incorporate a Graph Convolution Network (GCN) for collaborative filtering representation. The P4R framework aligns these two embedding spaces in order to address the general recommendation tasks. In our evaluation, we compare P4R with state-of-the-art Recommender models and assess the quality of prompt-based profile generation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.16674v3-abstract-full').style.display = 'none'; document.getElementById('2409.16674v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 25 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Risks: The 1st International Workshop on Risks, Opportunities, and Evaluation of Generative Models in Recommendation</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.06734">arXiv:2409.06734</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.06734">pdf</a>, <a href="https://arxiv.org/format/2409.06734">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> ARIM-mdx Data System: Towards a Nationwide Data Platform for Materials Science </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hanai%2C+M">Masatoshi Hanai</a>, <a href="/search/cs?searchtype=author&amp;query=Ishikawa%2C+R">Ryo Ishikawa</a>, <a href="/search/cs?searchtype=author&amp;query=Kawamura%2C+M">Mitsuaki Kawamura</a>, <a href="/search/cs?searchtype=author&amp;query=Ohnishi%2C+M">Masato Ohnishi</a>, <a href="/search/cs?searchtype=author&amp;query=Takenaka%2C+N">Norio Takenaka</a>, <a href="/search/cs?searchtype=author&amp;query=Nakamura%2C+K">Kou Nakamura</a>, <a href="/search/cs?searchtype=author&amp;query=Matsumura%2C+D">Daiju Matsumura</a>, <a href="/search/cs?searchtype=author&amp;query=Fujikawa%2C+S">Seiji Fujikawa</a>, <a href="/search/cs?searchtype=author&amp;query=Sakamoto%2C+H">Hiroki Sakamoto</a>, <a href="/search/cs?searchtype=author&amp;query=Ochiai%2C+Y">Yukinori Ochiai</a>, <a href="/search/cs?searchtype=author&amp;query=Okane%2C+T">Tetsuo Okane</a>, <a href="/search/cs?searchtype=author&amp;query=Kuroki%2C+S">Shin-Ichiro Kuroki</a>, <a href="/search/cs?searchtype=author&amp;query=Yamada%2C+A">Atsuo Yamada</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Shiomi%2C+J">Junichiro Shiomi</a>, <a href="/search/cs?searchtype=author&amp;query=Taura%2C+K">Kenjiro Taura</a>, <a href="/search/cs?searchtype=author&amp;query=Mita%2C+Y">Yoshio Mita</a>, <a href="/search/cs?searchtype=author&amp;query=Shibata%2C+N">Naoya Shibata</a>, <a href="/search/cs?searchtype=author&amp;query=Ikuhara%2C+Y">Yuichi Ikuhara</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.06734v3-abstract-short" style="display: inline;"> In modern materials science, effective and high-volume data management across leading-edge experimental facilities and world-class supercomputers is indispensable for cutting-edge research. However, existing integrated systems that handle data from these resources have primarily focused just on smaller-scale cross-institutional or single-domain operations. As a result, they often lack the scalabil&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.06734v3-abstract-full').style.display = 'inline'; document.getElementById('2409.06734v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.06734v3-abstract-full" style="display: none;"> In modern materials science, effective and high-volume data management across leading-edge experimental facilities and world-class supercomputers is indispensable for cutting-edge research. However, existing integrated systems that handle data from these resources have primarily focused just on smaller-scale cross-institutional or single-domain operations. As a result, they often lack the scalability, efficiency, agility, and interdisciplinarity, needed for handling substantial volumes of data from various researchers. In this paper, we introduce ARIM-mdx data system, aiming at a nationwide data platform for materials science in Japan. Currently in its trial phase, the platform has been involving 11 universities and institutes all over Japan, and it is utilized by over 800 researchers from around 140 organizations in academia and industry, being intended to gradually expand its reach. The ARIM-mdx data system, as a pioneering nationwide data platform, has the potential to contribute to the creation of new research communities and accelerate innovations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.06734v3-abstract-full').style.display = 'none'; document.getElementById('2409.06734v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">IEEE BigData 2024, to appear. Project Page https://arim.mdx.jp/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.09939">arXiv:2407.09939</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.09939">pdf</a>, <a href="https://arxiv.org/format/2407.09939">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Popular News Always Compete for the User&#39;s Attention! POPK: Mitigating Popularity Bias via a Temporal-Counterfactual </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Azevedo%2C+I+L+R">Igor L. R. Azevedo</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Yasui%2C+Y">Yuichiro Yasui</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.09939v1-abstract-short" style="display: inline;"> In news recommendation systems, reducing popularity bias is essential for delivering accurate and diverse recommendations. This paper presents POPK, a new method that uses temporal-counterfactual analysis to mitigate the influence of popular news articles. By asking, &#34;What if, at a given time $t$, a set of popular news articles were competing for the user&#39;s attention to be clicked?&#34;, POPK aims to&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.09939v1-abstract-full').style.display = 'inline'; document.getElementById('2407.09939v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.09939v1-abstract-full" style="display: none;"> In news recommendation systems, reducing popularity bias is essential for delivering accurate and diverse recommendations. This paper presents POPK, a new method that uses temporal-counterfactual analysis to mitigate the influence of popular news articles. By asking, &#34;What if, at a given time $t$, a set of popular news articles were competing for the user&#39;s attention to be clicked?&#34;, POPK aims to improve recommendation accuracy and diversity. We tested POPK on three different language datasets (Japanese, English, and Norwegian) and found that it successfully enhances traditional methods. POPK offers flexibility for customization to enhance either accuracy or diversity, alongside providing distinct ways of measuring popularity. We argue that popular news articles always compete for attention, even if they are not explicitly present in the user&#39;s impression list. POPK systematically eliminates the implicit influence of popular news articles during each training step. We combine counterfactual reasoning with a temporal approach to adjust the negative sample space, refining understanding of user interests. Our findings underscore how POPK effectively enhances the accuracy and diversity of recommended articles while also tailoring the approach to specific needs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.09939v1-abstract-full').style.display = 'none'; document.getElementById('2407.09939v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.09137">arXiv:2407.09137</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.09137">pdf</a>, <a href="https://arxiv.org/format/2407.09137">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> A Look Into News Avoidance Through AWRS: An Avoidance-Aware Recommender System </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Azevedo%2C+I+L+R">Igor L. R. Azevedo</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Yasui%2C+Y">Yuichiro Yasui</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.09137v1-abstract-short" style="display: inline;"> In recent years, journalists have expressed concerns about the increasing trend of news article avoidance, especially within specific domains. This issue has been exacerbated by the rise of recommender systems. Our research indicates that recommender systems should consider avoidance as a fundamental factor. We argue that news articles can be characterized by three principal elements: exposure, re&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.09137v1-abstract-full').style.display = 'inline'; document.getElementById('2407.09137v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.09137v1-abstract-full" style="display: none;"> In recent years, journalists have expressed concerns about the increasing trend of news article avoidance, especially within specific domains. This issue has been exacerbated by the rise of recommender systems. Our research indicates that recommender systems should consider avoidance as a fundamental factor. We argue that news articles can be characterized by three principal elements: exposure, relevance, and avoidance, all of which are closely interconnected. To address these challenges, we introduce AWRS, an Avoidance-Aware Recommender System. This framework incorporates avoidance awareness when recommending news, based on the premise that news article avoidance conveys significant information about user preferences. Evaluation results on three news datasets in different languages (English, Norwegian, and Japanese) demonstrate that our method outperforms existing approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.09137v1-abstract-full').style.display = 'none'; document.getElementById('2407.09137v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.03963">arXiv:2407.03963</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.03963">pdf</a>, <a href="https://arxiv.org/format/2407.03963">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> LLM-jp: A Cross-organizational Project for the Research and Development of Fully Open Japanese LLMs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=LLM-jp"> LLM-jp</a>, <a href="/search/cs?searchtype=author&amp;query=%3A"> :</a>, <a href="/search/cs?searchtype=author&amp;query=Aizawa%2C+A">Akiko Aizawa</a>, <a href="/search/cs?searchtype=author&amp;query=Aramaki%2C+E">Eiji Aramaki</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+B">Bowen Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+F">Fei Cheng</a>, <a href="/search/cs?searchtype=author&amp;query=Deguchi%2C+H">Hiroyuki Deguchi</a>, <a href="/search/cs?searchtype=author&amp;query=Enomoto%2C+R">Rintaro Enomoto</a>, <a href="/search/cs?searchtype=author&amp;query=Fujii%2C+K">Kazuki Fujii</a>, <a href="/search/cs?searchtype=author&amp;query=Fukumoto%2C+K">Kensuke Fukumoto</a>, <a href="/search/cs?searchtype=author&amp;query=Fukushima%2C+T">Takuya Fukushima</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+N">Namgi Han</a>, <a href="/search/cs?searchtype=author&amp;query=Harada%2C+Y">Yuto Harada</a>, <a href="/search/cs?searchtype=author&amp;query=Hashimoto%2C+C">Chikara Hashimoto</a>, <a href="/search/cs?searchtype=author&amp;query=Hiraoka%2C+T">Tatsuya Hiraoka</a>, <a href="/search/cs?searchtype=author&amp;query=Hisada%2C+S">Shohei Hisada</a>, <a href="/search/cs?searchtype=author&amp;query=Hosokawa%2C+S">Sosuke Hosokawa</a>, <a href="/search/cs?searchtype=author&amp;query=Jie%2C+L">Lu Jie</a>, <a href="/search/cs?searchtype=author&amp;query=Kamata%2C+K">Keisuke Kamata</a>, <a href="/search/cs?searchtype=author&amp;query=Kanazawa%2C+T">Teruhito Kanazawa</a>, <a href="/search/cs?searchtype=author&amp;query=Kanezashi%2C+H">Hiroki Kanezashi</a>, <a href="/search/cs?searchtype=author&amp;query=Kataoka%2C+H">Hiroshi Kataoka</a>, <a href="/search/cs?searchtype=author&amp;query=Katsumata%2C+S">Satoru Katsumata</a>, <a href="/search/cs?searchtype=author&amp;query=Kawahara%2C+D">Daisuke Kawahara</a>, <a href="/search/cs?searchtype=author&amp;query=Kawano%2C+S">Seiya Kawano</a> , et al. (57 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.03963v1-abstract-short" style="display: inline;"> This paper introduces LLM-jp, a cross-organizational project for the research and development of Japanese large language models (LLMs). LLM-jp aims to develop open-source and strong Japanese LLMs, and as of this writing, more than 1,500 participants from academia and industry are working together for this purpose. This paper presents the background of the establishment of LLM-jp, summaries of its&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.03963v1-abstract-full').style.display = 'inline'; document.getElementById('2407.03963v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.03963v1-abstract-full" style="display: none;"> This paper introduces LLM-jp, a cross-organizational project for the research and development of Japanese large language models (LLMs). LLM-jp aims to develop open-source and strong Japanese LLMs, and as of this writing, more than 1,500 participants from academia and industry are working together for this purpose. This paper presents the background of the establishment of LLM-jp, summaries of its activities, and technical reports on the LLMs developed by LLM-jp. For the latest activities, visit https://llm-jp.nii.ac.jp/en/. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.03963v1-abstract-full').style.display = 'none'; document.getElementById('2407.03963v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.08413">arXiv:2406.08413</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.08413">pdf</a>, <a href="https://arxiv.org/format/2406.08413">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Memory Is All You Need: An Overview of Compute-in-Memory Architectures for Accelerating Large Language Model Inference </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wolters%2C+C">Christopher Wolters</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+X">Xiaoxuan Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Schlichtmann%2C+U">Ulf Schlichtmann</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.08413v1-abstract-short" style="display: inline;"> Large language models (LLMs) have recently transformed natural language processing, enabling machines to generate human-like text and engage in meaningful conversations. This development necessitates speed, efficiency, and accessibility in LLM inference as the computational and memory requirements of these systems grow exponentially. Meanwhile, advancements in computing and memory capabilities are&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.08413v1-abstract-full').style.display = 'inline'; document.getElementById('2406.08413v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.08413v1-abstract-full" style="display: none;"> Large language models (LLMs) have recently transformed natural language processing, enabling machines to generate human-like text and engage in meaningful conversations. This development necessitates speed, efficiency, and accessibility in LLM inference as the computational and memory requirements of these systems grow exponentially. Meanwhile, advancements in computing and memory capabilities are lagging behind, exacerbated by the discontinuation of Moore&#39;s law. With LLMs exceeding the capacity of single GPUs, they require complex, expert-level configurations for parallel processing. Memory accesses become significantly more expensive than computation, posing a challenge for efficient scaling, known as the memory wall. Here, compute-in-memory (CIM) technologies offer a promising solution for accelerating AI inference by directly performing analog computations in memory, potentially reducing latency and power consumption. By closely integrating memory and compute elements, CIM eliminates the von Neumann bottleneck, reducing data movement and improving energy efficiency. This survey paper provides an overview and analysis of transformer-based models, reviewing various CIM architectures and exploring how they can address the imminent challenges of modern AI computing systems. We discuss transformer-related operators and their hardware acceleration schemes and highlight challenges, trends, and insights in corresponding CIM designs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.08413v1-abstract-full').style.display = 'none'; document.getElementById('2406.08413v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.16078">arXiv:2402.16078</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2402.16078">pdf</a>, <a href="https://arxiv.org/format/2402.16078">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Beyond Spatio-Temporal Representations: Evolving Fourier Transform for Temporal Graphs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bastos%2C+A">Anson Bastos</a>, <a href="/search/cs?searchtype=author&amp;query=Singh%2C+K">Kuldeep Singh</a>, <a href="/search/cs?searchtype=author&amp;query=Nadgeri%2C+A">Abhishek Nadgeri</a>, <a href="/search/cs?searchtype=author&amp;query=Singh%2C+M">Manish Singh</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.16078v2-abstract-short" style="display: inline;"> We present the Evolving Graph Fourier Transform (EFT), the first invertible spectral transform that captures evolving representations on temporal graphs. We motivate our work by the inadequacy of existing methods for capturing the evolving graph spectra, which are also computationally expensive due to the temporal aspect along with the graph vertex domain. We view the problem as an optimization ov&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.16078v2-abstract-full').style.display = 'inline'; document.getElementById('2402.16078v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.16078v2-abstract-full" style="display: none;"> We present the Evolving Graph Fourier Transform (EFT), the first invertible spectral transform that captures evolving representations on temporal graphs. We motivate our work by the inadequacy of existing methods for capturing the evolving graph spectra, which are also computationally expensive due to the temporal aspect along with the graph vertex domain. We view the problem as an optimization over the Laplacian of the continuous time dynamic graph. Additionally, we propose pseudo-spectrum relaxations that decompose the transformation process, making it highly computationally efficient. The EFT method adeptly captures the evolving graph&#39;s structural and positional properties, making it effective for downstream tasks on evolving graphs. Hence, as a reference implementation, we develop a simple neural model induced with EFT for capturing evolving graph spectra. We empirically validate our theoretical findings on a number of large-scale and standard temporal graph benchmarks and demonstrate that our model achieves state-of-the-art performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.16078v2-abstract-full').style.display = 'none'; document.getElementById('2402.16078v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 25 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted as a full conference paper in the International Conference on Learning Representations 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.06489">arXiv:2310.06489</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.06489">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> </div> <p class="title is-5 mathjax"> Deep Learning for Automatic Detection and Facial Recognition in Japanese Macaques: Illuminating Social Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Paulet%2C+J">Julien Paulet</a>, <a href="/search/cs?searchtype=author&amp;query=Molina%2C+A">Axel Molina</a>, <a href="/search/cs?searchtype=author&amp;query=Beltzung%2C+B">Benjamin Beltzung</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Takafumi Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Yamamoto%2C+S">Shinya Yamamoto</a>, <a href="/search/cs?searchtype=author&amp;query=Sueur%2C+C">C茅dric Sueur</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.06489v1-abstract-short" style="display: inline;"> Individual identification plays a pivotal role in ecology and ethology, notably as a tool for complex social structures understanding. However, traditional identification methods often involve invasive physical tags and can prove both disruptive for animals and time-intensive for researchers. In recent years, the integration of deep learning in research offered new methodological perspectives thro&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.06489v1-abstract-full').style.display = 'inline'; document.getElementById('2310.06489v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.06489v1-abstract-full" style="display: none;"> Individual identification plays a pivotal role in ecology and ethology, notably as a tool for complex social structures understanding. However, traditional identification methods often involve invasive physical tags and can prove both disruptive for animals and time-intensive for researchers. In recent years, the integration of deep learning in research offered new methodological perspectives through automatization of complex tasks. Harnessing object detection and recognition technologies is increasingly used by researchers to achieve identification on video footage. This study represents a preliminary exploration into the development of a non-invasive tool for face detection and individual identification of Japanese macaques (Macaca fuscata) through deep learning. The ultimate goal of this research is, using identifications done on the dataset, to automatically generate a social network representation of the studied population. The current main results are promising: (i) the creation of a Japanese macaques&#39; face detector (Faster-RCNN model), reaching a 82.2% accuracy and (ii) the creation of an individual recognizer for K{艒}jima island macaques population (YOLOv8n model), reaching a 83% accuracy. We also created a K{艒}jima population social network by traditional methods, based on co-occurrences on videos. Thus, we provide a benchmark against which the automatically generated network will be assessed for reliability. These preliminary results are a testament to the potential of this innovative approach to provide the scientific community with a tool for tracking individuals and social network studies in Japanese macaques. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.06489v1-abstract-full').style.display = 'none'; document.getElementById('2310.06489v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.01224">arXiv:2310.01224</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.01224">pdf</a>, <a href="https://arxiv.org/format/2310.01224">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3589132.3625644">10.1145/3589132.3625644 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Revisiting Mobility Modeling with Graph: A Graph Transformer Model for Next Point-of-Interest Recommendation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+X">Xiaohang Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Yong%2C+J">Jiawei Yong</a>, <a href="/search/cs?searchtype=author&amp;query=Hanai%2C+M">Masatoshi Hanai</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+C">Chuang Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Kanezashi%2C+H">Hiroki Kanezashi</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+R">Renhe Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Fukushima%2C+S">Shintaro Fukushima</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.01224v1-abstract-short" style="display: inline;"> Next Point-of-Interest (POI) recommendation plays a crucial role in urban mobility applications. Recently, POI recommendation models based on Graph Neural Networks (GNN) have been extensively studied and achieved, however, the effective incorporation of both spatial and temporal information into such GNN-based models remains challenging. Extracting distinct fine-grained features unique to each pie&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.01224v1-abstract-full').style.display = 'inline'; document.getElementById('2310.01224v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.01224v1-abstract-full" style="display: none;"> Next Point-of-Interest (POI) recommendation plays a crucial role in urban mobility applications. Recently, POI recommendation models based on Graph Neural Networks (GNN) have been extensively studied and achieved, however, the effective incorporation of both spatial and temporal information into such GNN-based models remains challenging. Extracting distinct fine-grained features unique to each piece of information is difficult since temporal information often includes spatial information, as users tend to visit nearby POIs. To address the challenge, we propose \textbf{\underline{Mob}}ility \textbf{\underline{G}}raph \textbf{\underline{T}}ransformer (MobGT) that enables us to fully leverage graphs to capture both the spatial and temporal features in users&#39; mobility patterns. MobGT combines individual spatial and temporal graph encoders to capture unique features and global user-location relations. Additionally, it incorporates a mobility encoder based on Graph Transformer to extract higher-order information between POIs. To address the long-tailed problem in spatial-temporal data, MobGT introduces a novel loss function, Tail Loss. Experimental results demonstrate that MobGT outperforms state-of-the-art models on various datasets and metrics, achieving 24\% improvement on average. Our codes are available at \url{https://github.com/Yukayo/MobGT}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.01224v1-abstract-full').style.display = 'none'; document.getElementById('2310.01224v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted as a full paper of SIGSPATIAL 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.08934">arXiv:2308.08934</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.08934">pdf</a>, <a href="https://arxiv.org/ps/2308.08934">ps</a>, <a href="https://arxiv.org/format/2308.08934">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Materials Science">cond-mat.mtrl-sci</span> </div> </div> <p class="title is-5 mathjax"> On Data Imbalance in Molecular Property Prediction with Pre-training </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+L">Limin Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Hanai%2C+M">Masatoshi Hanai</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Takashige%2C+S">Shun Takashige</a>, <a href="/search/cs?searchtype=author&amp;query=Taura%2C+K">Kenjiro Taura</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.08934v1-abstract-short" style="display: inline;"> Revealing and analyzing the various properties of materials is an essential and critical issue in the development of materials, including batteries, semiconductors, catalysts, and pharmaceuticals. Traditionally, these properties have been determined through theoretical calculations and simulations. However, it is not practical to perform such calculations on every single candidate material. Recent&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.08934v1-abstract-full').style.display = 'inline'; document.getElementById('2308.08934v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.08934v1-abstract-full" style="display: none;"> Revealing and analyzing the various properties of materials is an essential and critical issue in the development of materials, including batteries, semiconductors, catalysts, and pharmaceuticals. Traditionally, these properties have been determined through theoretical calculations and simulations. However, it is not practical to perform such calculations on every single candidate material. Recently, a combination method of the theoretical calculation and machine learning has emerged, that involves training machine learning models on a subset of theoretical calculation results to construct a surrogate model that can be applied to the remaining materials. On the other hand, a technique called pre-training is used to improve the accuracy of machine learning models. Pre-training involves training the model on pretext task, which is different from the target task, before training the model on the target task. This process aims to extract the input data features, stabilizing the learning process and improving its accuracy. However, in the case of molecular property prediction, there is a strong imbalance in the distribution of input data and features, which may lead to biased learning towards frequently occurring data during pre-training. In this study, we propose an effective pre-training method that addresses the imbalance in input data. We aim to improve the final accuracy by modifying the loss function of the existing representative pre-training method, node masking, to compensate the imbalance. We have investigated and assessed the impact of our proposed imbalance compensation on pre-training and the final prediction accuracy through experiments and evaluations using benchmark of molecular property prediction models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.08934v1-abstract-full').style.display = 'none'; document.getElementById('2308.08934v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.08129">arXiv:2308.08129</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.08129">pdf</a>, <a href="https://arxiv.org/format/2308.08129">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> </div> <p class="title is-5 mathjax"> Is Self-Supervised Pretraining Good for Extrapolation in Molecular Property Prediction? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Takashige%2C+S">Shun Takashige</a>, <a href="/search/cs?searchtype=author&amp;query=Hanai%2C+M">Masatoshi Hanai</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+L">Limin Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Taura%2C+K">Kenjiro Taura</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.08129v1-abstract-short" style="display: inline;"> The prediction of material properties plays a crucial role in the development and discovery of materials in diverse applications, such as batteries, semiconductors, catalysts, and pharmaceuticals. Recently, there has been a growing interest in employing data-driven approaches by using machine learning technologies, in combination with conventional theoretical calculations. In material science, the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.08129v1-abstract-full').style.display = 'inline'; document.getElementById('2308.08129v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.08129v1-abstract-full" style="display: none;"> The prediction of material properties plays a crucial role in the development and discovery of materials in diverse applications, such as batteries, semiconductors, catalysts, and pharmaceuticals. Recently, there has been a growing interest in employing data-driven approaches by using machine learning technologies, in combination with conventional theoretical calculations. In material science, the prediction of unobserved values, commonly referred to as extrapolation, is particularly critical for property prediction as it enables researchers to gain insight into materials beyond the limits of available data. However, even with the recent advancements in powerful machine learning models, accurate extrapolation is still widely recognized as a significantly challenging problem. On the other hand, self-supervised pretraining is a machine learning technique where a model is first trained on unlabeled data using relatively simple pretext tasks before being trained on labeled data for target tasks. As self-supervised pretraining can effectively utilize material data without observed property values, it has the potential to improve the model&#39;s extrapolation ability. In this paper, we clarify how such self-supervised pretraining can enhance extrapolation performance.We propose an experimental framework for the demonstration and empirically reveal that while models were unable to accurately extrapolate absolute property values, self-supervised pretraining enables them to learn relative tendencies of unobserved property values and improve extrapolation performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.08129v1-abstract-full').style.display = 'none'; document.getElementById('2308.08129v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.06576">arXiv:2307.06576</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2307.06576">pdf</a>, <a href="https://arxiv.org/format/2307.06576">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3604915.3608801">10.1145/3604915.3608801 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Going Beyond Local: Global Graph-Enhanced Personalized News Recommendations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yang%2C+B">Boming Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+D">Dairui Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+R">Ruihai Dong</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+I">Irene Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.06576v5-abstract-short" style="display: inline;"> Precisely recommending candidate news articles to users has always been a core challenge for personalized news recommendation systems. Most recent works primarily focus on using advanced natural language processing techniques to extract semantic information from rich textual data, employing content-based methods derived from local historical news. However, this approach lacks a global perspective,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.06576v5-abstract-full').style.display = 'inline'; document.getElementById('2307.06576v5-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.06576v5-abstract-full" style="display: none;"> Precisely recommending candidate news articles to users has always been a core challenge for personalized news recommendation systems. Most recent works primarily focus on using advanced natural language processing techniques to extract semantic information from rich textual data, employing content-based methods derived from local historical news. However, this approach lacks a global perspective, failing to account for users&#39; hidden motivations and behaviors beyond semantic information. To address this challenge, we propose a novel model called GLORY (Global-LOcal news Recommendation sYstem), which combines global representations learned from other users with local representations to enhance personalized recommendation systems. We accomplish this by constructing a Global-aware Historical News Encoder, which includes a global news graph and employs gated graph neural networks to enrich news representations, thereby fusing historical news representations by a historical news aggregator. Similarly, we extend this approach to a Global Candidate News Encoder, utilizing a global entity graph and a candidate news aggregator to enhance candidate news representation. Evaluation results on two public news datasets demonstrate that our method outperforms existing approaches. Furthermore, our model offers more diverse recommendations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.06576v5-abstract-full').style.display = 'none'; document.getElementById('2307.06576v5-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Recsys 2023, Best Student Paper</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2304.09105">arXiv:2304.09105</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2304.09105">pdf</a>, <a href="https://arxiv.org/ps/2304.09105">ps</a>, <a href="https://arxiv.org/format/2304.09105">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Exploring 360-Degree View of Customers for Lookalike Modeling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Rahman%2C+M+M">Md Mostafizur Rahman</a>, <a href="/search/cs?searchtype=author&amp;query=Kikuta%2C+D">Daisuke Kikuta</a>, <a href="/search/cs?searchtype=author&amp;query=Abrol%2C+S">Satyen Abrol</a>, <a href="/search/cs?searchtype=author&amp;query=Hirate%2C+Y">Yu Hirate</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Loyola%2C+P">Pablo Loyola</a>, <a href="/search/cs?searchtype=author&amp;query=Ebisu%2C+T">Takuma Ebisu</a>, <a href="/search/cs?searchtype=author&amp;query=Kondapaka%2C+M">Manoj Kondapaka</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2304.09105v1-abstract-short" style="display: inline;"> Lookalike models are based on the assumption that user similarity plays an important role towards product selling and enhancing the existing advertising campaigns from a very large user base. Challenges associated to these models reside on the heterogeneity of the user base and its sparsity. In this work, we propose a novel framework that unifies the customers different behaviors or features such&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.09105v1-abstract-full').style.display = 'inline'; document.getElementById('2304.09105v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2304.09105v1-abstract-full" style="display: none;"> Lookalike models are based on the assumption that user similarity plays an important role towards product selling and enhancing the existing advertising campaigns from a very large user base. Challenges associated to these models reside on the heterogeneity of the user base and its sparsity. In this work, we propose a novel framework that unifies the customers different behaviors or features such as demographics, buying behaviors on different platforms, customer loyalty behaviors and build a lookalike model to improve customer targeting for Rakuten Group, Inc. Extensive experiments on real e-commerce and travel datasets demonstrate the effectiveness of our proposed lookalike model for user targeting task. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.09105v1-abstract-full').style.display = 'none'; document.getElementById('2304.09105v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> SIGIR 2023 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2301.12929">arXiv:2301.12929</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2301.12929">pdf</a>, <a href="https://arxiv.org/format/2301.12929">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Algebraic Topology">math.AT</span> </div> </div> <p class="title is-5 mathjax"> Can Persistent Homology provide an efficient alternative for Evaluation of Knowledge Graph Completion Methods? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bastos%2C+A">Anson Bastos</a>, <a href="/search/cs?searchtype=author&amp;query=Singh%2C+K">Kuldeep Singh</a>, <a href="/search/cs?searchtype=author&amp;query=Nadgeri%2C+A">Abhishek Nadgeri</a>, <a href="/search/cs?searchtype=author&amp;query=Hoffart%2C+J">Johannes Hoffart</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Singh%2C+M">Manish Singh</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2301.12929v2-abstract-short" style="display: inline;"> In this paper we present a novel method, $\textit{Knowledge Persistence}$ ($\mathcal{KP}$), for faster evaluation of Knowledge Graph (KG) completion approaches. Current ranking-based evaluation is quadratic in the size of the KG, leading to long evaluation times and consequently a high carbon footprint. $\mathcal{KP}$ addresses this by representing the topology of the KG completion methods through&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.12929v2-abstract-full').style.display = 'inline'; document.getElementById('2301.12929v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2301.12929v2-abstract-full" style="display: none;"> In this paper we present a novel method, $\textit{Knowledge Persistence}$ ($\mathcal{KP}$), for faster evaluation of Knowledge Graph (KG) completion approaches. Current ranking-based evaluation is quadratic in the size of the KG, leading to long evaluation times and consequently a high carbon footprint. $\mathcal{KP}$ addresses this by representing the topology of the KG completion methods through the lens of topological data analysis, concretely using persistent homology. The characteristics of persistent homology allow $\mathcal{KP}$ to evaluate the quality of the KG completion looking only at a fraction of the data. Experimental results on standard datasets show that the proposed metric is highly correlated with ranking metrics (Hits@N, MR, MRR). Performance evaluation shows that $\mathcal{KP}$ is computationally efficient: In some cases, the evaluation time (validation+test) of a KG completion method has been reduced from 18 hours (using Hits@10) to 27 seconds (using $\mathcal{KP}$), and on average (across methods &amp; data) reduces the evaluation time (validation+test) by $\approx$ $\textbf{99.96}\%$. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.12929v2-abstract-full').style.display = 'none'; document.getElementById('2301.12929v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in proceedings of The Web Conference 2023 (WWW&#39;23)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2212.05989">arXiv:2212.05989</a> <span>&nbsp;&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> MegaCRN: Meta-Graph Convolutional Recurrent Network for Spatio-Temporal Modeling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+R">Renhe Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhaonan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yong%2C+J">Jiawei Yong</a>, <a href="/search/cs?searchtype=author&amp;query=Jeph%2C+P">Puneet Jeph</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Q">Quanjun Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Kobayashi%2C+Y">Yasumasa Kobayashi</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+X">Xuan Song</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Fukushima%2C+S">Shintaro Fukushima</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2212.05989v2-abstract-short" style="display: inline;"> Spatio-temporal modeling as a canonical task of multivariate time series forecasting has been a significant research topic in AI community. To address the underlying heterogeneity and non-stationarity implied in the graph streams, in this study, we propose Spatio-Temporal Meta-Graph Learning as a novel Graph Structure Learning mechanism on spatio-temporal data. Specifically, we implement this idea&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.05989v2-abstract-full').style.display = 'inline'; document.getElementById('2212.05989v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2212.05989v2-abstract-full" style="display: none;"> Spatio-temporal modeling as a canonical task of multivariate time series forecasting has been a significant research topic in AI community. To address the underlying heterogeneity and non-stationarity implied in the graph streams, in this study, we propose Spatio-Temporal Meta-Graph Learning as a novel Graph Structure Learning mechanism on spatio-temporal data. Specifically, we implement this idea into Meta-Graph Convolutional Recurrent Network (MegaCRN) by plugging the Meta-Graph Learner powered by a Meta-Node Bank into GCRN encoder-decoder. We conduct a comprehensive evaluation on two benchmark datasets (METR-LA and PEMS-BAY) and a large-scale spatio-temporal dataset that contains a variaty of non-stationary phenomena. Our model outperformed the state-of-the-arts to a large degree on all three datasets (over 27% MAE and 34% RMSE). Besides, through a series of qualitative evaluations, we demonstrate that our model can explicitly disentangle locations and time slots with different patterns and be robustly adaptive to different anomalous situations. Codes and datasets are available at https://github.com/deepkashiwa20/MegaCRN. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.05989v2-abstract-full').style.display = 'none'; document.getElementById('2212.05989v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Rejected by AIJ. We withdraw for now and shall further work on the manuscript</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.14701">arXiv:2211.14701</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2211.14701">pdf</a>, <a href="https://arxiv.org/format/2211.14701">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Spatio-Temporal Meta-Graph Learning for Traffic Forecasting </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+R">Renhe Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Z">Zhaonan Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Yong%2C+J">Jiawei Yong</a>, <a href="/search/cs?searchtype=author&amp;query=Jeph%2C+P">Puneet Jeph</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Q">Quanjun Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Kobayashi%2C+Y">Yasumasa Kobayashi</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+X">Xuan Song</a>, <a href="/search/cs?searchtype=author&amp;query=Fukushima%2C+S">Shintaro Fukushima</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.14701v4-abstract-short" style="display: inline;"> Traffic forecasting as a canonical task of multivariate time series forecasting has been a significant research topic in AI community. To address the spatio-temporal heterogeneity and non-stationarity implied in the traffic stream, in this study, we propose Spatio-Temporal Meta-Graph Learning as a novel Graph Structure Learning mechanism on spatio-temporal data. Specifically, we implement this ide&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.14701v4-abstract-full').style.display = 'inline'; document.getElementById('2211.14701v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.14701v4-abstract-full" style="display: none;"> Traffic forecasting as a canonical task of multivariate time series forecasting has been a significant research topic in AI community. To address the spatio-temporal heterogeneity and non-stationarity implied in the traffic stream, in this study, we propose Spatio-Temporal Meta-Graph Learning as a novel Graph Structure Learning mechanism on spatio-temporal data. Specifically, we implement this idea into Meta-Graph Convolutional Recurrent Network (MegaCRN) by plugging the Meta-Graph Learner powered by a Meta-Node Bank into GCRN encoder-decoder. We conduct a comprehensive evaluation on two benchmark datasets (i.e., METR-LA and PEMS-BAY) and a new large-scale traffic speed dataset called EXPY-TKY that covers 1843 expressway road links in Tokyo. Our model outperformed the state-of-the-arts on all three datasets. Besides, through a series of qualitative evaluations, we demonstrate that our model can explicitly disentangle the road links and time slots with different patterns and be robustly adaptive to any anomalous traffic situations. Codes and datasets are available at https://github.com/deepkashiwa20/MegaCRN. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.14701v4-abstract-full').style.display = 'none'; document.getElementById('2211.14701v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by AAAI 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.11979">arXiv:2211.11979</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2211.11979">pdf</a>, <a href="https://arxiv.org/format/2211.11979">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Learnable Spectral Wavelets on Dynamic Graphs to Capture Global Interactions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bastos%2C+A">Anson Bastos</a>, <a href="/search/cs?searchtype=author&amp;query=Nadgeri%2C+A">Abhishek Nadgeri</a>, <a href="/search/cs?searchtype=author&amp;query=Singh%2C+K">Kuldeep Singh</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Singh%2C+M">Manish Singh</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.11979v1-abstract-short" style="display: inline;"> Learning on evolving(dynamic) graphs has caught the attention of researchers as static methods exhibit limited performance in this setting. The existing methods for dynamic graphs learn spatial features by local neighborhood aggregation, which essentially only captures the low pass signals and local interactions. In this work, we go beyond current approaches to incorporate global features for effe&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.11979v1-abstract-full').style.display = 'inline'; document.getElementById('2211.11979v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.11979v1-abstract-full" style="display: none;"> Learning on evolving(dynamic) graphs has caught the attention of researchers as static methods exhibit limited performance in this setting. The existing methods for dynamic graphs learn spatial features by local neighborhood aggregation, which essentially only captures the low pass signals and local interactions. In this work, we go beyond current approaches to incorporate global features for effectively learning representations of a dynamically evolving graph. We propose to do so by capturing the spectrum of the dynamic graph. Since static methods to learn the graph spectrum would not consider the history of the evolution of the spectrum as the graph evolves with time, we propose a novel approach to learn the graph wavelets to capture this evolving spectra. Further, we propose a framework that integrates the dynamically captured spectra in the form of these learnable wavelets into spatial features for incorporating local and global interactions. Experiments on eight standard datasets show that our method significantly outperforms related methods on various tasks for dynamic graphs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.11979v1-abstract-full').style.display = 'none'; document.getElementById('2211.11979v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication in AAAI 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.12102">arXiv:2205.12102</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2205.12102">pdf</a>, <a href="https://arxiv.org/format/2205.12102">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> KQGC: Knowledge Graph Embedding with Smoothing Effects of Graph Convolutions for Recommendation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kikuta%2C+D">Daisuke Kikuta</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Rahman%2C+M+M">Md Mostafizur Rahman</a>, <a href="/search/cs?searchtype=author&amp;query=Hirate%2C+Y">Yu Hirate</a>, <a href="/search/cs?searchtype=author&amp;query=Abrol%2C+S">Satyen Abrol</a>, <a href="/search/cs?searchtype=author&amp;query=Kondapaka%2C+M">Manoj Kondapaka</a>, <a href="/search/cs?searchtype=author&amp;query=Ebisu%2C+T">Takuma Ebisu</a>, <a href="/search/cs?searchtype=author&amp;query=Loyola%2C+P">Pablo Loyola</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.12102v1-abstract-short" style="display: inline;"> Leveraging graphs on recommender systems has gained popularity with the development of graph representation learning (GRL). In particular, knowledge graph embedding (KGE) and graph neural networks (GNNs) are representative GRL approaches, which have achieved the state-of-the-art performance on several recommendation tasks. Furthermore, combination of KGE and GNNs (KG-GNNs) has been explored and fo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.12102v1-abstract-full').style.display = 'inline'; document.getElementById('2205.12102v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.12102v1-abstract-full" style="display: none;"> Leveraging graphs on recommender systems has gained popularity with the development of graph representation learning (GRL). In particular, knowledge graph embedding (KGE) and graph neural networks (GNNs) are representative GRL approaches, which have achieved the state-of-the-art performance on several recommendation tasks. Furthermore, combination of KGE and GNNs (KG-GNNs) has been explored and found effective in many academic literatures. One of the main characteristics of GNNs is their ability to retain structural properties among neighbors in the resulting dense representation, which is usually coined as smoothing. The smoothing is specially desired in the presence of homophilic graphs, such as the ones we find on recommender systems. In this paper, we propose a new model for recommender systems named Knowledge Query-based Graph Convolution (KQGC). In contrast to exisiting KG-GNNs, KQGC focuses on the smoothing, and leverages a simple linear graph convolution for smoothing KGE. A pre-trained KGE is fed into KQGC, and it is smoothed by aggregating neighbor knowledge queries, which allow entity-embeddings to be aligned on appropriate vector points for smoothing KGE effectively. We apply the proposed KQGC to a recommendation task that aims prospective users for specific products. Extensive experiments on a real E-commerce dataset demonstrate the effectiveness of KQGC. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.12102v1-abstract-full').style.display = 'none'; document.getElementById('2205.12102v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9pages, 6 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2203.14188">arXiv:2203.14188</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2203.14188">pdf</a>, <a href="https://arxiv.org/ps/2203.14188">ps</a>, <a href="https://arxiv.org/format/2203.14188">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> mdx: A Cloud Platform for Supporting Data Science and Cross-Disciplinary Research Collaborations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Sugiki%2C+A">Akiyoshi Sugiki</a>, <a href="/search/cs?searchtype=author&amp;query=Takizawa%2C+H">Hiroyuki Takizawa</a>, <a href="/search/cs?searchtype=author&amp;query=Imakura%2C+A">Akira Imakura</a>, <a href="/search/cs?searchtype=author&amp;query=Nakamura%2C+H">Hiroshi Nakamura</a>, <a href="/search/cs?searchtype=author&amp;query=Taura%2C+K">Kenjiro Taura</a>, <a href="/search/cs?searchtype=author&amp;query=Kudoh%2C+T">Tomohiro Kudoh</a>, <a href="/search/cs?searchtype=author&amp;query=Hanawa%2C+T">Toshihiro Hanawa</a>, <a href="/search/cs?searchtype=author&amp;query=Sekiya%2C+Y">Yuji Sekiya</a>, <a href="/search/cs?searchtype=author&amp;query=Kobayashi%2C+H">Hiroki Kobayashi</a>, <a href="/search/cs?searchtype=author&amp;query=Matsushima%2C+S">Shin Matsushima</a>, <a href="/search/cs?searchtype=author&amp;query=Kuga%2C+Y">Yohei Kuga</a>, <a href="/search/cs?searchtype=author&amp;query=Nakamura%2C+R">Ryo Nakamura</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+R">Renhe Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Kawase%2C+J">Junya Kawase</a>, <a href="/search/cs?searchtype=author&amp;query=Hanai%2C+M">Masatoshi Hanai</a>, <a href="/search/cs?searchtype=author&amp;query=Miyazaki%2C+H">Hiroshi Miyazaki</a>, <a href="/search/cs?searchtype=author&amp;query=Ishizaki%2C+T">Tsutomu Ishizaki</a>, <a href="/search/cs?searchtype=author&amp;query=Shimotoku%2C+D">Daisuke Shimotoku</a>, <a href="/search/cs?searchtype=author&amp;query=Miyamoto%2C+D">Daisuke Miyamoto</a>, <a href="/search/cs?searchtype=author&amp;query=Aida%2C+K">Kento Aida</a>, <a href="/search/cs?searchtype=author&amp;query=Takefusa%2C+A">Atsuko Takefusa</a>, <a href="/search/cs?searchtype=author&amp;query=Kurimoto%2C+T">Takashi Kurimoto</a>, <a href="/search/cs?searchtype=author&amp;query=Sasayama%2C+K">Koji Sasayama</a>, <a href="/search/cs?searchtype=author&amp;query=Kitagawa%2C+N">Naoya Kitagawa</a> , et al. (8 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2203.14188v1-abstract-short" style="display: inline;"> The growing amount of data and advances in data science have created a need for a new kind of cloud platform that provides users with flexibility, strong security, and the ability to couple with supercomputers and edge devices through high-performance networks. We have built such a nation-wide cloud platform, called &#34;mdx&#34; to meet this need. The mdx platform&#39;s virtualization service, jointly operat&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.14188v1-abstract-full').style.display = 'inline'; document.getElementById('2203.14188v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2203.14188v1-abstract-full" style="display: none;"> The growing amount of data and advances in data science have created a need for a new kind of cloud platform that provides users with flexibility, strong security, and the ability to couple with supercomputers and edge devices through high-performance networks. We have built such a nation-wide cloud platform, called &#34;mdx&#34; to meet this need. The mdx platform&#39;s virtualization service, jointly operated by 9 national universities and 2 national research institutes in Japan, launched in 2021, and more features are in development. Currently mdx is used by researchers in a wide variety of domains, including materials informatics, geo-spatial information science, life science, astronomical science, economics, social science, and computer science. This paper provides an the overview of the mdx platform, details the motivation for its development, reports its current status, and outlines its future plans. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.14188v1-abstract-full').style.display = 'none'; document.getElementById('2203.14188v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2203.12363">arXiv:2203.12363</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2203.12363">pdf</a>, <a href="https://arxiv.org/format/2203.12363">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> </div> <p class="title is-5 mathjax"> Ethereum Fraud Detection with Heterogeneous Graph Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kanezashi%2C+H">Hiroki Kanezashi</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+X">Xin Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Hirofuchi%2C+T">Takahiro Hirofuchi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2203.12363v3-abstract-short" style="display: inline;"> While transactions with cryptocurrencies such as Ethereum are becoming more prevalent, fraud and other criminal transactions are not uncommon. Graph analysis algorithms and machine learning techniques detect suspicious transactions that lead to phishing in large transaction networks. Many graph neural network (GNN) models have been proposed to apply deep learning techniques to graph structures. Al&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.12363v3-abstract-full').style.display = 'inline'; document.getElementById('2203.12363v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2203.12363v3-abstract-full" style="display: none;"> While transactions with cryptocurrencies such as Ethereum are becoming more prevalent, fraud and other criminal transactions are not uncommon. Graph analysis algorithms and machine learning techniques detect suspicious transactions that lead to phishing in large transaction networks. Many graph neural network (GNN) models have been proposed to apply deep learning techniques to graph structures. Although there is research on phishing detection using GNN models in the Ethereum transaction network, models that address the scale of the number of vertices and edges and the imbalance of labels have not yet been studied. In this paper, we compared the model performance of GNN models on the actual Ethereum transaction network dataset and phishing reported label data to exhaustively compare and verify which GNN models and hyperparameters produce the best accuracy. Specifically, we evaluated the model performance of representative homogeneous GNN models which consider single-type nodes and edges and heterogeneous GNN models which support different types of nodes and edges. We showed that heterogeneous models had better model performance than homogeneous models. In particular, the RGCN model achieved the best performance in the overall metrics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.12363v3-abstract-full').style.display = 'none'; document.getElementById('2203.12363v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 5 figures, Accepted to KDD&#39;22 Workshop on Mining and Learning with Graphs</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2201.09332">arXiv:2201.09332</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2201.09332">pdf</a>, <a href="https://arxiv.org/format/2201.09332">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> How Expressive are Transformers in Spectral Domain for Graphs? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bastos%2C+A">Anson Bastos</a>, <a href="/search/cs?searchtype=author&amp;query=Nadgeri%2C+A">Abhishek Nadgeri</a>, <a href="/search/cs?searchtype=author&amp;query=Singh%2C+K">Kuldeep Singh</a>, <a href="/search/cs?searchtype=author&amp;query=Kanezashi%2C+H">Hiroki Kanezashi</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Mulang%27%2C+I+O">Isaiah Onando Mulang&#39;</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2201.09332v4-abstract-short" style="display: inline;"> The recent works proposing transformer-based models for graphs have proven the inadequacy of Vanilla Transformer for graph representation learning. To understand this inadequacy, there is a need to investigate if spectral analysis of the transformer will reveal insights into its expressive power. Similar studies already established that spectral analysis of Graph neural networks (GNNs) provides ex&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.09332v4-abstract-full').style.display = 'inline'; document.getElementById('2201.09332v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2201.09332v4-abstract-full" style="display: none;"> The recent works proposing transformer-based models for graphs have proven the inadequacy of Vanilla Transformer for graph representation learning. To understand this inadequacy, there is a need to investigate if spectral analysis of the transformer will reveal insights into its expressive power. Similar studies already established that spectral analysis of Graph neural networks (GNNs) provides extra perspectives on their expressiveness. In this work, we systematically study and establish the link between the spatial and spectral domain in the realm of the transformer. We further provide a theoretical analysis and prove that the spatial attention mechanism in the transformer cannot effectively capture the desired frequency response, thus, inherently limiting its expressiveness in spectral space. Therefore, we propose FeTA, a framework that aims to perform attention over the entire graph spectrum (i.e., actual frequency components of the graphs) analogous to the attention in spatial space. Empirical results suggest that FeTA provides homogeneous performance gain against vanilla transformer across all tasks on standard benchmarks and can easily be extended to GNN-based models with low-pass characteristics (e.g., GAT). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.09332v4-abstract-full').style.display = 'none'; document.getElementById('2201.09332v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in Transactions on Machine Learning Research</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.07893">arXiv:2109.07893</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2109.07893">pdf</a>, <a href="https://arxiv.org/format/2109.07893">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3458817.3480858">10.1145/3458817.3480858 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Efficient Scaling of Dynamic Graph Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chakaravarthy%2C+V+T">Venkatesan T. Chakaravarthy</a>, <a href="/search/cs?searchtype=author&amp;query=Pandian%2C+S+S">Shivmaran S. Pandian</a>, <a href="/search/cs?searchtype=author&amp;query=Raje%2C+S">Saurabh Raje</a>, <a href="/search/cs?searchtype=author&amp;query=Sabharwal%2C+Y">Yogish Sabharwal</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Ubaru%2C+S">Shashanka Ubaru</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.07893v1-abstract-short" style="display: inline;"> We present distributed algorithms for training dynamic Graph Neural Networks (GNN) on large scale graphs spanning multi-node, multi-GPU systems. To the best of our knowledge, this is the first scaling study on dynamic GNN. We devise mechanisms for reducing the GPU memory usage and identify two execution time bottlenecks: CPU-GPU data transfer; and communication volume. Exploiting properties of dyn&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.07893v1-abstract-full').style.display = 'inline'; document.getElementById('2109.07893v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.07893v1-abstract-full" style="display: none;"> We present distributed algorithms for training dynamic Graph Neural Networks (GNN) on large scale graphs spanning multi-node, multi-GPU systems. To the best of our knowledge, this is the first scaling study on dynamic GNN. We devise mechanisms for reducing the GPU memory usage and identify two execution time bottlenecks: CPU-GPU data transfer; and communication volume. Exploiting properties of dynamic graphs, we design a graph difference-based strategy to significantly reduce the transfer time. We develop a simple, but effective data distribution technique under which the communication volume remains fixed and linear in the input size, for any number of GPUs. Our experiments using billion-size graphs on a system of 128 GPUs shows that: (i) the distribution scheme achieves up to 30x speedup on 128 GPUs; (ii) the graph-difference technique reduces the transfer time by a factor of up to 4.1x and the overall execution time by up to 40% <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.07893v1-abstract-full').style.display = 'none'; document.getElementById('2109.07893v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Conference version to appear in the proceedings of SC&#39;21</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.6; C.2.4 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2105.10094">arXiv:2105.10094</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2105.10094">pdf</a>, <a href="https://arxiv.org/ps/2105.10094">ps</a>, <a href="https://arxiv.org/format/2105.10094">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> </div> </div> <p class="title is-5 mathjax"> Finding All Bounded-Length Simple Cycles in a Directed Graph </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Gupta%2C+A">Anshul Gupta</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2105.10094v2-abstract-short" style="display: inline;"> A new efficient algorithm is presented for finding all simple cycles that satisfy a length constraint in a directed graph. When the number of vertices is non-trivial, most cycle-finding problems are of practical interest for sparse graphs only. We show that for a class of sparse graphs in which the vertex degrees are almost uniform, our algorithm can find all cycles of length less than or equal to&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.10094v2-abstract-full').style.display = 'inline'; document.getElementById('2105.10094v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2105.10094v2-abstract-full" style="display: none;"> A new efficient algorithm is presented for finding all simple cycles that satisfy a length constraint in a directed graph. When the number of vertices is non-trivial, most cycle-finding problems are of practical interest for sparse graphs only. We show that for a class of sparse graphs in which the vertex degrees are almost uniform, our algorithm can find all cycles of length less than or equal to $k$ in $O((c+n)(k-1)d^k)$ steps, where $n$ is the number of vertices, $c$ is the total number of cycles discovered, $d$ is the average degree of the graph&#39;s vertices, and $k &gt; 1$. While our analysis for the running time addresses only a class of sparse graphs, we provide empirical and experimental evidence of the efficiency of the algorithm for general sparse graphs. This algorithm is a significant improvement over the only other deterministic algorithm for this problem known to us; it also lends itself to massive parallelism. Experimental results of a serial implementation on some large real-world graphs are presented. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.10094v2-abstract-full').style.display = 'none'; document.getElementById('2105.10094v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> G.2.2; I.1.2 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2103.14620">arXiv:2103.14620</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2103.14620">pdf</a>, <a href="https://arxiv.org/format/2103.14620">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> LiGCN: Label-interpretable Graph Convolutional Networks for Multi-label Text Classification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+I">Irene Li</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+A">Aosong Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+H">Hao Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+T">Tianxiao Li</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+R">Ruihai Dong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2103.14620v2-abstract-short" style="display: inline;"> Multi-label text classification (MLTC) is an attractive and challenging task in natural language processing (NLP). Compared with single-label text classification, MLTC has a wider range of applications in practice. In this paper, we propose a label-interpretable graph convolutional network model to solve the MLTC problem by modeling tokens and labels as nodes in a heterogeneous graph. In this way,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.14620v2-abstract-full').style.display = 'inline'; document.getElementById('2103.14620v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2103.14620v2-abstract-full" style="display: none;"> Multi-label text classification (MLTC) is an attractive and challenging task in natural language processing (NLP). Compared with single-label text classification, MLTC has a wider range of applications in practice. In this paper, we propose a label-interpretable graph convolutional network model to solve the MLTC problem by modeling tokens and labels as nodes in a heterogeneous graph. In this way, we are able to take into account multiple relationships including token-level relationships. Besides, the model allows better interpretability for predicted labels as the token-label edges are exposed. We evaluate our method on four real-world datasets and it achieves competitive scores against selected baseline methods. Specifically, this model achieves a gain of 0.14 on the F1 score in the small label set MLTC, and 0.07 in the large label set scenario. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.14620v2-abstract-full').style.display = 'none'; document.getElementById('2103.14620v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 March, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 tables, 3 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> DLG4NLP Workshop, NAACL 2022 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2101.07026">arXiv:2101.07026</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2101.07026">pdf</a>, <a href="https://arxiv.org/ps/2101.07026">ps</a>, <a href="https://arxiv.org/format/2101.07026">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Discrete Mathematics">cs.DM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> </div> <p class="title is-5 mathjax"> Time-Efficient and High-Quality Graph Partitioning for Graph Dynamic Scaling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hanai%2C+M">Masatoshi Hanai</a>, <a href="/search/cs?searchtype=author&amp;query=Tziritas%2C+N">Nikos Tziritas</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Cai%2C+W">Wentong Cai</a>, <a href="/search/cs?searchtype=author&amp;query=Theodoropoulos%2C+G">Georgios Theodoropoulos</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2101.07026v1-abstract-short" style="display: inline;"> The dynamic scaling of distributed computations plays an important role in the utilization of elastic computational resources, such as the cloud. It enables the provisioning and de-provisioning of resources to match dynamic resource availability and demands. In the case of distributed graph processing, changing the number of the graph partitions while maintaining high partitioning quality imposes&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.07026v1-abstract-full').style.display = 'inline'; document.getElementById('2101.07026v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2101.07026v1-abstract-full" style="display: none;"> The dynamic scaling of distributed computations plays an important role in the utilization of elastic computational resources, such as the cloud. It enables the provisioning and de-provisioning of resources to match dynamic resource availability and demands. In the case of distributed graph processing, changing the number of the graph partitions while maintaining high partitioning quality imposes serious computational overheads as typically a time-consuming graph partitioning algorithm needs to execute each time repartitioning is required. In this paper, we propose a dynamic scaling method that can efficiently change the number of graph partitions while keeping its quality high. Our idea is based on two techniques: preprocessing and very fast edge partitioning, called graph edge ordering and chunk-based edge partitioning, respectively. The former converts the graph data into an ordered edge list in such a way that edges with high locality are closer to each other. The latter immediately divides the ordered edge list into an arbitrary number of high-quality partitions. The evaluation with the real-world billion-scale graphs demonstrates that our proposed approach significantly reduces the repartitioning time, while the partitioning quality it achieves is on par with that of the best existing static method. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.07026v1-abstract-full').style.display = 'none'; document.getElementById('2101.07026v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">21 pages, 15 figures. Under review</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2006.05573">arXiv:2006.05573</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2006.05573">pdf</a>, <a href="https://arxiv.org/format/2006.05573">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> </div> </div> <p class="title is-5 mathjax"> Global Data Science Project for COVID-19 </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Garcia-Gasulla%2C+D">Dario Garcia-Gasulla</a>, <a href="/search/cs?searchtype=author&amp;query=Napagao%2C+S+A">Sergio Alvarez Napagao</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+I">Irene Li</a>, <a href="/search/cs?searchtype=author&amp;query=Maruyama%2C+H">Hiroshi Maruyama</a>, <a href="/search/cs?searchtype=author&amp;query=Kanezashi%2C+H">Hiroki Kanezashi</a>, <a href="/search/cs?searchtype=author&amp;query=P%27erez-Arnal%2C+R">Raquel P&#39;erez-Arnal</a>, <a href="/search/cs?searchtype=author&amp;query=Miyoshi%2C+K">Kunihiko Miyoshi</a>, <a href="/search/cs?searchtype=author&amp;query=Ishii%2C+E">Euma Ishii</a>, <a href="/search/cs?searchtype=author&amp;query=Suzuki%2C+K">Keita Suzuki</a>, <a href="/search/cs?searchtype=author&amp;query=Shiba%2C+S">Sayaka Shiba</a>, <a href="/search/cs?searchtype=author&amp;query=Kurokawa%2C+M">Mariko Kurokawa</a>, <a href="/search/cs?searchtype=author&amp;query=Kanzawa%2C+Y">Yuta Kanzawa</a>, <a href="/search/cs?searchtype=author&amp;query=Nakagawa%2C+N">Naomi Nakagawa</a>, <a href="/search/cs?searchtype=author&amp;query=Hanai%2C+M">Masatoshi Hanai</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yixin Li</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+T">Tianxiao Li</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2006.05573v2-abstract-short" style="display: inline;"> This paper aims at providing the summary of the Global Data Science Project (GDSC) for COVID-19. as on May 31 2020. COVID-19 has largely impacted on our societies through both direct and indirect effects transmitted by the policy measures to counter the spread of viruses. We quantitatively analysed the multifaceted impacts of the COVID-19 pandemic on our societies including people&#39;s mobility, heal&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.05573v2-abstract-full').style.display = 'inline'; document.getElementById('2006.05573v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2006.05573v2-abstract-full" style="display: none;"> This paper aims at providing the summary of the Global Data Science Project (GDSC) for COVID-19. as on May 31 2020. COVID-19 has largely impacted on our societies through both direct and indirect effects transmitted by the policy measures to counter the spread of viruses. We quantitatively analysed the multifaceted impacts of the COVID-19 pandemic on our societies including people&#39;s mobility, health, and social behaviour changes. People&#39;s mobility has changed significantly due to the implementation of travel restriction and quarantine measurements. Indeed, the physical distance has widened at international (cross-border), national and regional level. At international level, due to the travel restrictions, the number of international flights has plunged overall at around 88 percent during March. In particular, the number of flights connecting Europe dropped drastically in mid of March after the United States announced travel restrictions to Europe and the EU and participating countries agreed to close borders, at 84 percent decline compared to March 10th. Similarly, we examined the impacts of quarantine measures in the major city: Tokyo (Japan), New York City (the United States), and Barcelona (Spain). Within all three cities, we found the significant decline in traffic volume. We also identified the increased concern for mental health through the analysis of posts on social networking services such as Twitter and Instagram. Notably, in the beginning of April 2020, the number of post with #depression on Instagram doubled, which might reflect the rise in mental health awareness among Instagram users. Besides, we identified the changes in a wide range of people&#39;s social behaviors, as well as economic impacts through the analysis of Instagram data and primary survey data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.05573v2-abstract-full').style.display = 'none'; document.getElementById('2006.05573v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">42 pages, 49 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2006.02950">arXiv:2006.02950</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2006.02950">pdf</a>, <a href="https://arxiv.org/format/2006.02950">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> </div> </div> <p class="title is-5 mathjax"> The Impact of COVID-19 on Flight Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Kanezashi%2C+H">Hiroki Kanezashi</a>, <a href="/search/cs?searchtype=author&amp;query=Dholakia%2C+M">Mishal Dholakia</a>, <a href="/search/cs?searchtype=author&amp;query=Ishii%2C+E">Euma Ishii</a>, <a href="/search/cs?searchtype=author&amp;query=Napagao%2C+S+A">Sergio Alvarez Napagao</a>, <a href="/search/cs?searchtype=author&amp;query=P%C3%A9rez-Arnal%2C+R">Raquel P茅rez-Arnal</a>, <a href="/search/cs?searchtype=author&amp;query=Garcia-Gasulla%2C+D">Dario Garcia-Gasulla</a>, <a href="/search/cs?searchtype=author&amp;query=Murofushi%2C+T">Toshiaki Murofushi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2006.02950v3-abstract-short" style="display: inline;"> As COVID-19 transmissions spread worldwide, governments have announced and enforced travel restrictions to prevent further infections. Such restrictions have a direct effect on the volume of international flights among these countries, resulting in extensive social and economic costs. To better understand the situation in a quantitative manner, we used the Opensky network data to clarify flight pa&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.02950v3-abstract-full').style.display = 'inline'; document.getElementById('2006.02950v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2006.02950v3-abstract-full" style="display: none;"> As COVID-19 transmissions spread worldwide, governments have announced and enforced travel restrictions to prevent further infections. Such restrictions have a direct effect on the volume of international flights among these countries, resulting in extensive social and economic costs. To better understand the situation in a quantitative manner, we used the Opensky network data to clarify flight patterns and flight densities around the world and observe relationships between flight numbers with new infections, and with the economy (unemployment rate) in Barcelona. We found that the number of daily flights gradually decreased and suddenly dropped 64% during the second half of March in 2020 after the US and Europe enacted travel restrictions. We also observed a 51% decrease in the global flight network density decreased during this period. Regarding new COVID-19 cases, the world had an unexpected surge regardless of travel restrictions. Finally, the layoffs for temporary workers in the tourism and airplane business increased by 4.3 fold in the weeks following Spain&#39;s decision to close its borders. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.02950v3-abstract-full').style.display = 'none'; document.getElementById('2006.02950v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 42 figures. Toyotaro Suzumura and Hiroki Kanezashi contributed equally to this work</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2005.12873">arXiv:2005.12873</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2005.12873">pdf</a>, <a href="https://arxiv.org/format/2005.12873">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Performance">cs.PF</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> </div> <p class="title is-5 mathjax"> Benchmarking Graph Data Management and Processing Systems: A Survey </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Dayarathna%2C+M">Miyuru Dayarathna</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2005.12873v4-abstract-short" style="display: inline;"> The development of scalable, representative, and widely adopted benchmarks for graph data systems have been a question for which answers has been sought for decades. We conduct an in-depth study of the existing literature on benchmarks for graph data management and processing, covering 20 different benchmarks developed during the last 15 years. We categorize the benchmarks into three areas focusin&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.12873v4-abstract-full').style.display = 'inline'; document.getElementById('2005.12873v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2005.12873v4-abstract-full" style="display: none;"> The development of scalable, representative, and widely adopted benchmarks for graph data systems have been a question for which answers has been sought for decades. We conduct an in-depth study of the existing literature on benchmarks for graph data management and processing, covering 20 different benchmarks developed during the last 15 years. We categorize the benchmarks into three areas focusing on benchmarks for graph processing systems, graph database benchmarks, and bigdata benchmarks with graph processing workloads. This systematic approach allows us to identify multiple issues existing in this area, including i) few benchmarks exist which can produce high workload scenarios, ii) no significant work done on benchmarking graph stream processing as well as graph based machine learning, iii) benchmarks tend to use conventional metrics despite new meaningful metrics have been around for years, iv) increasing number of big data benchmarks appear with graph processing workloads. Following these observations, we conclude the survey by describing key challenges for future research on graph data systems benchmarking. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.12873v4-abstract-full').style.display = 'none'; document.getElementById('2005.12873v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 May, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">26 pages, 5 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> A.1; E.1; H.2 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2004.10899">arXiv:2004.10899</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2004.10899">pdf</a>, <a href="https://arxiv.org/format/2004.10899">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> What are We Depressed about When We Talk about COVID19: Mental Health Analysis on Tweets Using Natural Language Processing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+I">Irene Li</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+Y">Yixin Li</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+T">Tianxiao Li</a>, <a href="/search/cs?searchtype=author&amp;query=Alvarez-Napagao%2C+S">Sergio Alvarez-Napagao</a>, <a href="/search/cs?searchtype=author&amp;query=Garcia-Gasulla%2C+D">Dario Garcia-Gasulla</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2004.10899v3-abstract-short" style="display: inline;"> The outbreak of coronavirus disease 2019 (COVID-19) recently has affected human life to a great extent. Besides direct physical and economic threats, the pandemic also indirectly impact people&#39;s mental health conditions, which can be overwhelming but difficult to measure. The problem may come from various reasons such as unemployment status, stay-at-home policy, fear for the virus, and so forth. I&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.10899v3-abstract-full').style.display = 'inline'; document.getElementById('2004.10899v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2004.10899v3-abstract-full" style="display: none;"> The outbreak of coronavirus disease 2019 (COVID-19) recently has affected human life to a great extent. Besides direct physical and economic threats, the pandemic also indirectly impact people&#39;s mental health conditions, which can be overwhelming but difficult to measure. The problem may come from various reasons such as unemployment status, stay-at-home policy, fear for the virus, and so forth. In this work, we focus on applying natural language processing (NLP) techniques to analyze tweets in terms of mental health. We trained deep models that classify each tweet into the following emotions: anger, anticipation, disgust, fear, joy, sadness, surprise and trust. We build the EmoCT (Emotion-Covid19-Tweet) dataset for the training purpose by manually labeling 1,000 English tweets. Furthermore, we propose and compare two methods to find out the reasons that are causing sadness and fear. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.10899v3-abstract-full').style.display = 'none'; document.getElementById('2004.10899v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7 pages, 7 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1912.07701">arXiv:1912.07701</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1912.07701">pdf</a>, <a href="https://arxiv.org/ps/1912.07701">ps</a>, <a href="https://arxiv.org/format/1912.07701">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="General Finance">q-fin.GN</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Statistical Finance">q-fin.ST</span> </div> </div> <p class="title is-5 mathjax"> Exploring Multi-Banking Customer-to-Customer Relations in AML Context with Poincar茅 Embeddings </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Stavarache%2C+L+L">Lucia Larise Stavarache</a>, <a href="/search/cs?searchtype=author&amp;query=Narbutis%2C+D">Donatas Narbutis</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Harishankar%2C+R">Ray Harishankar</a>, <a href="/search/cs?searchtype=author&amp;query=%C5%BDaltauskas%2C+A">Augustas 沤altauskas</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1912.07701v2-abstract-short" style="display: inline;"> In the recent years money laundering schemes have grown in complexity and speed of realization, affecting financial institutions and millions of customers globally. Strengthened privacy policies, along with in-country regulations, make it hard for banks to inner- and cross-share, and report suspicious activities for the AML (Anti-Money Laundering) measures. Existing topologies and models for AML a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1912.07701v2-abstract-full').style.display = 'inline'; document.getElementById('1912.07701v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1912.07701v2-abstract-full" style="display: none;"> In the recent years money laundering schemes have grown in complexity and speed of realization, affecting financial institutions and millions of customers globally. Strengthened privacy policies, along with in-country regulations, make it hard for banks to inner- and cross-share, and report suspicious activities for the AML (Anti-Money Laundering) measures. Existing topologies and models for AML analysis and information sharing are subject to major limitations, such as compliance with regulatory constraints, extended infrastructure to run high-computation algorithms, data quality and span, proving cumbersome and costly to execute, federate, and interpret. This paper proposes a new topology for exploring multi-banking customer social relations in AML context -- customer-to-customer, customer-to-transaction, and transaction-to-transaction -- using a 3D modeling topological algebra formulated through Poincar茅 embeddings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1912.07701v2-abstract-full').style.display = 'none'; document.getElementById('1912.07701v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 December, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">NeurIPS 2019 Workshop on Robust AI in Financial Services (https://sites.google.com/view/robust-ai-in-fs-2019)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1909.12946">arXiv:1909.12946</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1909.12946">pdf</a>, <a href="https://arxiv.org/format/1909.12946">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Statistical Finance">q-fin.ST</span> </div> </div> <p class="title is-5 mathjax"> Towards Federated Graph Learning for Collaborative Financial Crimes Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Y">Yi Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Baracaldo%2C+N">Natahalie Baracaldo</a>, <a href="/search/cs?searchtype=author&amp;query=Ye%2C+G">Guangnan Ye</a>, <a href="/search/cs?searchtype=author&amp;query=Houck%2C+K">Keith Houck</a>, <a href="/search/cs?searchtype=author&amp;query=Kawahara%2C+R">Ryo Kawahara</a>, <a href="/search/cs?searchtype=author&amp;query=Anwar%2C+A">Ali Anwar</a>, <a href="/search/cs?searchtype=author&amp;query=Stavarache%2C+L+L">Lucia Larise Stavarache</a>, <a href="/search/cs?searchtype=author&amp;query=Watanabe%2C+Y">Yuji Watanabe</a>, <a href="/search/cs?searchtype=author&amp;query=Loyola%2C+P">Pablo Loyola</a>, <a href="/search/cs?searchtype=author&amp;query=Klyashtorny%2C+D">Daniel Klyashtorny</a>, <a href="/search/cs?searchtype=author&amp;query=Ludwig%2C+H">Heiko Ludwig</a>, <a href="/search/cs?searchtype=author&amp;query=Bhaskaran%2C+K">Kumar Bhaskaran</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1909.12946v2-abstract-short" style="display: inline;"> Financial crime is a large and growing problem, in some way touching almost every financial institution. Financial institutions are the front line in the war against financial crime and accordingly, must devote substantial human and technology resources to this effort. Current processes to detect financial misconduct have limitations in their ability to effectively differentiate between malicious&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.12946v2-abstract-full').style.display = 'inline'; document.getElementById('1909.12946v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1909.12946v2-abstract-full" style="display: none;"> Financial crime is a large and growing problem, in some way touching almost every financial institution. Financial institutions are the front line in the war against financial crime and accordingly, must devote substantial human and technology resources to this effort. Current processes to detect financial misconduct have limitations in their ability to effectively differentiate between malicious behavior and ordinary financial activity. These limitations tend to result in gross over-reporting of suspicious activity that necessitate time-intensive and costly manual review. Advances in technology used in this domain, including machine learning based approaches, can improve upon the effectiveness of financial institutions&#39; existing processes, however, a key challenge that most financial institutions continue to face is that they address financial crimes in isolation without any insight from other firms. Where financial institutions address financial crimes through the lens of their own firm, perpetrators may devise sophisticated strategies that may span across institutions and geographies. Financial institutions continue to work relentlessly to advance their capabilities, forming partnerships across institutions to share insights, patterns and capabilities. These public-private partnerships are subject to stringent regulatory and data privacy requirements, thereby making it difficult to rely on traditional technology solutions. In this paper, we propose a methodology to share key information across institutions by using a federated graph learning platform that enables us to build more accurate machine learning models by leveraging federated learning and also graph learning approaches. We demonstrated that our federated model outperforms local model by 20% with the UK FCA TechSprint data set. This new platform opens up a door to efficiently detecting global money laundering activity. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.12946v2-abstract-full').style.display = 'none'; document.getElementById('1909.12946v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 October, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 September, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1909.10660">arXiv:1909.10660</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1909.10660">pdf</a>, <a href="https://arxiv.org/format/1909.10660">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Statistical Finance">q-fin.ST</span> </div> </div> <p class="title is-5 mathjax"> Exploring Graph Neural Networks for Stock Market Predictions with Rolling Window Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Matsunaga%2C+D">Daiki Matsunaga</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Takahashi%2C+T">Toshihiro Takahashi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1909.10660v3-abstract-short" style="display: inline;"> Recently, there has been a surge of interest in the use of machine learning to help aid in the accurate predictions of financial markets. Despite the exciting advances in this cross-section of finance and AI, many of the current approaches are limited to using technical analysis to capture historical trends of each stock price and thus limited to certain experimental setups to obtain good predicti&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.10660v3-abstract-full').style.display = 'inline'; document.getElementById('1909.10660v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1909.10660v3-abstract-full" style="display: none;"> Recently, there has been a surge of interest in the use of machine learning to help aid in the accurate predictions of financial markets. Despite the exciting advances in this cross-section of finance and AI, many of the current approaches are limited to using technical analysis to capture historical trends of each stock price and thus limited to certain experimental setups to obtain good prediction results. On the other hand, professional investors additionally use their rich knowledge of inter-market and inter-company relations to map the connectivity of companies and events, and use this map to make better market predictions. For instance, they would predict the movement of a certain company&#39;s stock price based not only on its former stock price trends but also on the performance of its suppliers or customers, the overall industry, macroeconomic factors and trade policies. This paper investigates the effectiveness of work at the intersection of market predictions and graph neural networks, which hold the potential to mimic the ways in which investors make decisions by incorporating company knowledge graphs directly into the predictive model. The main goal of this work is to test the validity of this approach across different markets and longer time horizons for backtesting using rolling window analysis. In this work, we concentrate on the prediction of individual stock prices in the Japanese Nikkei 225 market over a period of roughly 20 years. For the knowledge graph, we use the Nikkei Value Search data, which is a rich dataset showing mainly supplier relations among Japanese and foreign companies. Our preliminary results show a 29.5% increase and a 2.2-fold increase in the return ratio and Sharpe ratio, respectively, when compared to the market benchmark, as well as a 6.32% increase and 1.3-fold increase, respectively, compared to the baseline LSTM model. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.10660v3-abstract-full').style.display = 'none'; document.getElementById('1909.10660v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 November, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 September, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">NeurIPS 2019 Workshop on Robust AI in Financial Services: Data, Fairness, Explainability, Trustworthiness, and Privacy (Robust AI in FS), Vancouver, Canada</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1908.05855">arXiv:1908.05855</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1908.05855">pdf</a>, <a href="https://arxiv.org/ps/1908.05855">ps</a>, <a href="https://arxiv.org/format/1908.05855">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> </div> </div> <p class="title is-5 mathjax"> Distributed Edge Partitioning for Trillion-edge Graphs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hanai%2C+M">Masatoshi Hanai</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+W+J">Wen Jun Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+E">Elvis Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Theodoropoulos%2C+G">Georgios Theodoropoulos</a>, <a href="/search/cs?searchtype=author&amp;query=Cai%2C+W">Wentong Cai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1908.05855v2-abstract-short" style="display: inline;"> We propose Distributed Neighbor Expansion (Distributed NE), a parallel and distributed graph partitioning method that can scale to trillion-edge graphs while providing high partitioning quality. Distributed NE is based on a new heuristic, called parallel expansion, where each partition is constructed in parallel by greedily expanding its edge set from a single vertex in such a way that the increas&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1908.05855v2-abstract-full').style.display = 'inline'; document.getElementById('1908.05855v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1908.05855v2-abstract-full" style="display: none;"> We propose Distributed Neighbor Expansion (Distributed NE), a parallel and distributed graph partitioning method that can scale to trillion-edge graphs while providing high partitioning quality. Distributed NE is based on a new heuristic, called parallel expansion, where each partition is constructed in parallel by greedily expanding its edge set from a single vertex in such a way that the increase of the vertex cuts becomes local minimal. We theoretically prove that the proposed method has the upper bound in the partitioning quality. The empirical evaluation with various graphs shows that the proposed method produces higher-quality partitions than the state-of-the-art distributed graph partitioning algorithms. The performance evaluation shows that the space efficiency of the proposed method is an order-of-magnitude better than the existing algorithms, keeping its time efficiency comparable. As a result, Distributed NE can partition a trillion-edge graph using only 256 machines within 70 minutes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1908.05855v2-abstract-full').style.display = 'none'; document.getElementById('1908.05855v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 September, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 August, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">VLDB 2020, Code in http://www.masahanai.jp/DistributedNE/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1902.10191">arXiv:1902.10191</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1902.10191">pdf</a>, <a href="https://arxiv.org/format/1902.10191">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> EvolveGCN: Evolving Graph Convolutional Networks for Dynamic Graphs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pareja%2C+A">Aldo Pareja</a>, <a href="/search/cs?searchtype=author&amp;query=Domeniconi%2C+G">Giacomo Domeniconi</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jie Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+T">Tengfei Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Kanezashi%2C+H">Hiroki Kanezashi</a>, <a href="/search/cs?searchtype=author&amp;query=Kaler%2C+T">Tim Kaler</a>, <a href="/search/cs?searchtype=author&amp;query=Schardl%2C+T+B">Tao B. Schardl</a>, <a href="/search/cs?searchtype=author&amp;query=Leiserson%2C+C+E">Charles E. Leiserson</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1902.10191v3-abstract-short" style="display: inline;"> Graph representation learning resurges as a trending research subject owing to the widespread use of deep learning for Euclidean data, which inspire various creative designs of neural networks in the non-Euclidean domain, particularly graphs. With the success of these graph neural networks (GNN) in the static setting, we approach further practical scenarios where the graph dynamically evolves. Exi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1902.10191v3-abstract-full').style.display = 'inline'; document.getElementById('1902.10191v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1902.10191v3-abstract-full" style="display: none;"> Graph representation learning resurges as a trending research subject owing to the widespread use of deep learning for Euclidean data, which inspire various creative designs of neural networks in the non-Euclidean domain, particularly graphs. With the success of these graph neural networks (GNN) in the static setting, we approach further practical scenarios where the graph dynamically evolves. Existing approaches typically resort to node embeddings and use a recurrent neural network (RNN, broadly speaking) to regulate the embeddings and learn the temporal dynamics. These methods require the knowledge of a node in the full time span (including both training and testing) and are less applicable to the frequent change of the node set. In some extreme scenarios, the node sets at different time steps may completely differ. To resolve this challenge, we propose EvolveGCN, which adapts the graph convolutional network (GCN) model along the temporal dimension without resorting to node embeddings. The proposed approach captures the dynamism of the graph sequence through using an RNN to evolve the GCN parameters. Two architectures are considered for the parameter evolution. We evaluate the proposed approach on tasks including link prediction, edge classification, and node classification. The experimental results indicate a generally higher performance of EvolveGCN compared with related approaches. The code is available at \url{https://github.com/IBM/EvolveGCN}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1902.10191v3-abstract-full').style.display = 'none'; document.getElementById('1902.10191v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 February, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">AAAI 2020. The code is available at https://github.com/IBM/EvolveGCN</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1812.10321">arXiv:1812.10321</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1812.10321">pdf</a>, <a href="https://arxiv.org/format/1812.10321">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/HiPC.2018.00019">10.1109/HiPC.2018.00019 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Adaptive Pattern Matching with Reinforcement Learning for Dynamic Graphs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kanezashi%2C+H">Hiroki Kanezashi</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Garcia-Gasulla%2C+D">Dario Garcia-Gasulla</a>, <a href="/search/cs?searchtype=author&amp;query=Oh%2C+M">Min-hwan Oh</a>, <a href="/search/cs?searchtype=author&amp;query=Matsuoka%2C+S">Satoshi Matsuoka</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1812.10321v1-abstract-short" style="display: inline;"> Graph pattern matching algorithms to handle million-scale dynamic graphs are widely used in many applications such as social network analytics and suspicious transaction detections from financial networks. On the other hand, the computation complexity of many graph pattern matching algorithms is expensive, and it is not affordable to extract patterns from million-scale graphs. Moreover, most real-&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1812.10321v1-abstract-full').style.display = 'inline'; document.getElementById('1812.10321v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1812.10321v1-abstract-full" style="display: none;"> Graph pattern matching algorithms to handle million-scale dynamic graphs are widely used in many applications such as social network analytics and suspicious transaction detections from financial networks. On the other hand, the computation complexity of many graph pattern matching algorithms is expensive, and it is not affordable to extract patterns from million-scale graphs. Moreover, most real-world networks are time-evolving, updating their structures continuously, which makes it harder to update and output newly matched patterns in real time. Many incremental graph pattern matching algorithms which reduce the number of updates have been proposed to handle such dynamic graphs. However, it is still challenging to recompute vertices in the incremental graph pattern matching algorithms in a single process, and that prevents the real-time analysis. We propose an incremental graph pattern matching algorithm to deal with time-evolving graph data and also propose an adaptive optimization system based on reinforcement learning to recompute vertices in the incremental process more efficiently. Then we discuss the qualitative efficiency of our system with several types of data graphs and pattern graphs. We evaluate the performance using million-scale attributed and time-evolving social graphs. Our incremental algorithm is up to 10.1 times faster than an existing graph pattern matching and 1.95 times faster with the adaptive systems in a computation node than naive incremental processing. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1812.10321v1-abstract-full').style.display = 'none'; document.getElementById('1812.10321v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 December, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages and 11 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1812.00076">arXiv:1812.00076</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1812.00076">pdf</a>, <a href="https://arxiv.org/ps/1812.00076">ps</a>, <a href="https://arxiv.org/format/1812.00076">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Scalable Graph Learning for Anti-Money Laundering: A First Look </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Weber%2C+M">Mark Weber</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jie Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Pareja%2C+A">Aldo Pareja</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+T">Tengfei Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Kanezashi%2C+H">Hiroki Kanezashi</a>, <a href="/search/cs?searchtype=author&amp;query=Kaler%2C+T">Tim Kaler</a>, <a href="/search/cs?searchtype=author&amp;query=Leiserson%2C+C+E">Charles E. Leiserson</a>, <a href="/search/cs?searchtype=author&amp;query=Schardl%2C+T+B">Tao B. Schardl</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1812.00076v1-abstract-short" style="display: inline;"> Organized crime inflicts human suffering on a genocidal scale: the Mexican drug cartels have murdered 150,000 people since 2006, upwards of 700,000 people per year are &#34;exported&#34; in a human trafficking industry enslaving an estimated 40 million people. These nefarious industries rely on sophisticated money laundering schemes to operate. Despite tremendous resources dedicated to anti-money launderi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1812.00076v1-abstract-full').style.display = 'inline'; document.getElementById('1812.00076v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1812.00076v1-abstract-full" style="display: none;"> Organized crime inflicts human suffering on a genocidal scale: the Mexican drug cartels have murdered 150,000 people since 2006, upwards of 700,000 people per year are &#34;exported&#34; in a human trafficking industry enslaving an estimated 40 million people. These nefarious industries rely on sophisticated money laundering schemes to operate. Despite tremendous resources dedicated to anti-money laundering (AML) only a tiny fraction of illicit activity is prevented. The research community can help. In this brief paper, we map the structural and behavioral dynamics driving the technical challenge. We review AML methods, current and emergent. We provide a first look at scalable graph convolutional neural networks for forensic analysis of financial data, which is massive, dense, and dynamic. We report preliminary experimental results using a large synthetic graph (1M nodes, 9M edges) generated by a data simulator we created called AMLSim. We consider opportunities for high performance efficiency, in terms of computation and memory, and we share results from a simple graph compression experiment. Our results support our working hypothesis that graph deep learning for AML bears great promise in the fight against criminal financial activity. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1812.00076v1-abstract-full').style.display = 'none'; document.getElementById('1812.00076v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 November, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">NeurIPS 2018 Workshop on Challenges and Opportunities for AI in Financial Services: the Impact of Fairness, Explainability, Accuracy, and Privacy, Montreal, Canada</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1808.06251">arXiv:1808.06251</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1808.06251">pdf</a>, <a href="https://arxiv.org/format/1808.06251">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/BigData.2016.7840991">10.1109/BigData.2016.7840991 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> An incremental local-first community detection method for dynamic graphs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kanezashi%2C+H">Hiroki Kanezashi</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1808.06251v1-abstract-short" style="display: inline;"> Community detections for large-scale real world networks have been more popular in social analytics. In particular, dynamically growing network analyses become important to find long-term trends and detect anomalies. In order to analyze such networks, we need to obtain many snapshots and apply same analytic methods to them. However, it is inefficient to extract communities from these whole newly g&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1808.06251v1-abstract-full').style.display = 'inline'; document.getElementById('1808.06251v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1808.06251v1-abstract-full" style="display: none;"> Community detections for large-scale real world networks have been more popular in social analytics. In particular, dynamically growing network analyses become important to find long-term trends and detect anomalies. In order to analyze such networks, we need to obtain many snapshots and apply same analytic methods to them. However, it is inefficient to extract communities from these whole newly generated networks with little differences every time, and then it is impossible to follow the network growths in the real time. We proposed an incremental community detection algorithm for high-volume graph streams. It is based on the top of a well-known batch-oriented algorithm named DEMON[1]. We also evaluated performance and precisions of our proposed incremental algorithm with real-world big networks with up to 410,236 vertices and 2,439,437 edges and computed in less than one second to detect communities in an incremental fashion - which achieves up to 107 times faster than the original algorithm without sacrificing accuracies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1808.06251v1-abstract-full').style.display = 'none'; document.getElementById('1808.06251v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 August, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 7 figures and 3 pseudo codes, 2016 IEEE International Conference on Big Data (Big Data)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1804.07152">arXiv:1804.07152</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1804.07152">pdf</a>, <a href="https://arxiv.org/format/1804.07152">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Scalable attribute-aware network embedding with locality </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+W">Weiyi Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zhining Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+G">Guangmin Hu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1804.07152v2-abstract-short" style="display: inline;"> Adding attributes for nodes to network embedding helps to improve the ability of the learned joint representation to depict features from topology and attributes simultaneously. Recent research on the joint embedding has exhibited a promising performance on a variety of tasks by jointly embedding the two spaces. However, due to the indispensable requirement of globality based information, present&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1804.07152v2-abstract-full').style.display = 'inline'; document.getElementById('1804.07152v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1804.07152v2-abstract-full" style="display: none;"> Adding attributes for nodes to network embedding helps to improve the ability of the learned joint representation to depict features from topology and attributes simultaneously. Recent research on the joint embedding has exhibited a promising performance on a variety of tasks by jointly embedding the two spaces. However, due to the indispensable requirement of globality based information, present approaches contain a flaw of in-scalability. Here we propose \emph{SANE}, a scalable attribute-aware network embedding algorithm with locality, to learn the joint representation from topology and attributes. By enforcing the alignment of a local linear relationship between each node and its K-nearest neighbors in topology and attribute space, the joint embedding representations are more informative comparing with a single representation from topology or attributes alone. And we argue that the locality in \emph{SANE} is the key to learning the joint representation at scale. By using several real-world networks from diverse domains, We demonstrate the efficacy of \emph{SANE} in performance and scalability aspect. Overall, for performance on label classification, SANE successfully reaches up to the highest F1-score on most datasets, and even closer to the baseline method that needs label information as extra inputs, compared with other state-of-the-art joint representation algorithms. What&#39;s more, \emph{SANE} has an up to 71.4\% performance gain compared with the single topology-based algorithm. For scalability, we have demonstrated the linearly time complexity of \emph{SANE}. In addition, we intuitively observe that when the network size scales to 100,000 nodes, the &#34;learning joint embedding&#34; step of \emph{SANE} only takes $\approx10$ seconds. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1804.07152v2-abstract-full').style.display = 'none'; document.getElementById('1804.07152v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 April, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 April, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1802.03057">arXiv:1802.03057</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1802.03057">pdf</a>, <a href="https://arxiv.org/ps/1802.03057">ps</a>, <a href="https://arxiv.org/format/1802.03057">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> </div> </div> <p class="title is-5 mathjax"> System G Distributed Graph Database </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tanase%2C+G">Gabriel Tanase</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Lee%2C+J">Jinho Lee</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+C">Chun-Fu Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Crawford%2C+J">Jason Crawford</a>, <a href="/search/cs?searchtype=author&amp;query=Kanezashi%2C+H">Hiroki Kanezashi</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+S">Song Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Vijitbenjaronk%2C+W+D">Warut D. Vijitbenjaronk</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1802.03057v1-abstract-short" style="display: inline;"> Motivated by the need to extract knowledge and value from interconnected data, graph analytics on big data is a very active area of research in both industry and academia. To support graph analytics efficiently a large number of in memory graph libraries, graph processing systems and graph databases have emerged. Projects in each of these categories focus on particular aspects such as static versu&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1802.03057v1-abstract-full').style.display = 'inline'; document.getElementById('1802.03057v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1802.03057v1-abstract-full" style="display: none;"> Motivated by the need to extract knowledge and value from interconnected data, graph analytics on big data is a very active area of research in both industry and academia. To support graph analytics efficiently a large number of in memory graph libraries, graph processing systems and graph databases have emerged. Projects in each of these categories focus on particular aspects such as static versus dynamic graphs, off line versus on line processing, small versus large graphs, etc. While there has been much advance in graph processing in the past decades, there is still a need for a fast graph processing, using a cluster of machines with distributed storage. In this paper, we discuss a novel distributed graph database called System G designed for efficient graph data storage and processing on modern computing architectures. In particular we describe a single node graph database and a runtime and communication layer that allows us to compose a distributed graph database from multiple single node instances. From various industry requirements, we find that fast insertions and large volume concurrent queries are critical parts of the graph databases and we optimize our database for such features. We experimentally show the efficiency of System G for storing data and processing graph queries on state-of-the-art platforms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1802.03057v1-abstract-full').style.display = 'none'; document.getElementById('1802.03057v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 February, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1709.03551">arXiv:1709.03551</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1709.03551">pdf</a>, <a href="https://arxiv.org/format/1709.03551">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> </div> </div> <p class="title is-5 mathjax"> Principled Multilayer Network Embedding </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+W">Weiyi Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+P">Pin-Yu Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Yeung%2C+S">Sailung Yeung</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+L">Lingli Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1709.03551v3-abstract-short" style="display: inline;"> Multilayer network analysis has become a vital tool for understanding different relationships and their interactions in a complex system, where each layer in a multilayer network depicts the topological structure of a group of nodes corresponding to a particular relationship. The interactions among different layers imply how the interplay of different relations on the topology of each layer. For a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1709.03551v3-abstract-full').style.display = 'inline'; document.getElementById('1709.03551v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1709.03551v3-abstract-full" style="display: none;"> Multilayer network analysis has become a vital tool for understanding different relationships and their interactions in a complex system, where each layer in a multilayer network depicts the topological structure of a group of nodes corresponding to a particular relationship. The interactions among different layers imply how the interplay of different relations on the topology of each layer. For a single-layer network, network embedding methods have been proposed to project the nodes in a network into a continuous vector space with a relatively small number of dimensions, where the space embeds the social representations among nodes. These algorithms have been proved to have a better performance on a variety of regular graph analysis tasks, such as link prediction, or multi-label classification. In this paper, by extending a standard graph mining into multilayer network, we have proposed three methods (&#34;network aggregation,&#34; &#34;results aggregation&#34; and &#34;layer co-analysis&#34;) to project a multilayer network into a continuous vector space. From the evaluation, we have proved that comparing with regular link prediction methods, &#34;layer co-analysis&#34; achieved the best performance on most of the datasets, while &#34;network aggregation&#34; and &#34;results aggregation&#34; also have better performance than regular link prediction methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1709.03551v3-abstract-full').style.display = 'none'; document.getElementById('1709.03551v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 September, 2017; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 September, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2017. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1709.03545">arXiv:1709.03545</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1709.03545">pdf</a>, <a href="https://arxiv.org/format/1709.03545">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Learning Graph Topological Features via GAN </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+W">Weiyi Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Cooper%2C+H">Hal Cooper</a>, <a href="/search/cs?searchtype=author&amp;query=Oh%2C+M+H">Min Hwan Oh</a>, <a href="/search/cs?searchtype=author&amp;query=Yeung%2C+S">Sailung Yeung</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+P">Pin-Yu Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+L">Lingli Chen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1709.03545v5-abstract-short" style="display: inline;"> Inspired by the generation power of generative adversarial networks (GANs) in image domains, we introduce a novel hierarchical architecture for learning characteristic topological features from a single arbitrary input graph via GANs. The hierarchical architecture consisting of multiple GANs preserves both local and global topological features and automatically partitions the input graph into repr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1709.03545v5-abstract-full').style.display = 'inline'; document.getElementById('1709.03545v5-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1709.03545v5-abstract-full" style="display: none;"> Inspired by the generation power of generative adversarial networks (GANs) in image domains, we introduce a novel hierarchical architecture for learning characteristic topological features from a single arbitrary input graph via GANs. The hierarchical architecture consisting of multiple GANs preserves both local and global topological features and automatically partitions the input graph into representative stages for feature learning. The stages facilitate reconstruction and can be used as indicators of the importance of the associated topological structures. Experiments show that our method produces subgraphs retaining a wide range of topological features, even in early reconstruction stages (unlike a single GAN, which cannot easily identify such features, let alone reconstruct the original graph). This paper is firstline research on combining the use of GANs and graph topological analysis. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1709.03545v5-abstract-full').style.display = 'none'; document.getElementById('1709.03545v5-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 October, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 September, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2017. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1707.09872">arXiv:1707.09872</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1707.09872">pdf</a>, <a href="https://arxiv.org/format/1707.09872">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> </div> </div> <p class="title is-5 mathjax"> Full-Network Embedding in a Multimodal Embedding Pipeline </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Vilalta%2C+A">Armand Vilalta</a>, <a href="/search/cs?searchtype=author&amp;query=Garcia-Gasulla%2C+D">Dario Garcia-Gasulla</a>, <a href="/search/cs?searchtype=author&amp;query=Par%C3%A9s%2C+F">Ferran Par茅s</a>, <a href="/search/cs?searchtype=author&amp;query=Ayguad%C3%A9%2C+E">Eduard Ayguad茅</a>, <a href="/search/cs?searchtype=author&amp;query=Labarta%2C+J">Jesus Labarta</a>, <a href="/search/cs?searchtype=author&amp;query=Cort%C3%A9s%2C+U">Ulises Cort茅s</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1707.09872v2-abstract-short" style="display: inline;"> The current state-of-the-art for image annotation and image retrieval tasks is obtained through deep neural networks, which combine an image representation and a text representation into a shared embedding space. In this paper we evaluate the impact of using the Full-Network embedding in this setting, replacing the original image representation in a competitive multimodal embedding generation sche&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1707.09872v2-abstract-full').style.display = 'inline'; document.getElementById('1707.09872v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1707.09872v2-abstract-full" style="display: none;"> The current state-of-the-art for image annotation and image retrieval tasks is obtained through deep neural networks, which combine an image representation and a text representation into a shared embedding space. In this paper we evaluate the impact of using the Full-Network embedding in this setting, replacing the original image representation in a competitive multimodal embedding generation scheme. Unlike the one-layer image embeddings typically used by most approaches, the Full-Network embedding provides a multi-scale representation of images, which results in richer characterizations. To measure the influence of the Full-Network embedding, we evaluate its performance on three different datasets, and compare the results with the original multimodal embedding generation scheme when using a one-layer image embedding, and with the rest of the state-of-the-art. Results for image annotation and image retrieval tasks indicate that the Full-Network embedding is consistently superior to the one-layer embedding. These results motivate the integration of the Full-Network embedding on any multimodal embedding generation scheme, something feasible thanks to the flexibility of the approach. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1707.09872v2-abstract-full').style.display = 'none'; document.getElementById('1707.09872v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 August, 2017; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 July, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">In 2nd Workshop on Semantic Deep Learning (SemDeep-2) at the 12th International Conference on Computational Semantics (IWCS) 2017</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1707.07465">arXiv:1707.07465</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1707.07465">pdf</a>, <a href="https://arxiv.org/format/1707.07465">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> </div> </div> <p class="title is-5 mathjax"> Building Graph Representations of Deep Vector Embeddings </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Garcia-Gasulla%2C+D">Dario Garcia-Gasulla</a>, <a href="/search/cs?searchtype=author&amp;query=Vilalta%2C+A">Armand Vilalta</a>, <a href="/search/cs?searchtype=author&amp;query=Par%C3%A9s%2C+F">Ferran Par茅s</a>, <a href="/search/cs?searchtype=author&amp;query=Moreno%2C+J">Jonatan Moreno</a>, <a href="/search/cs?searchtype=author&amp;query=Ayguad%C3%A9%2C+E">Eduard Ayguad茅</a>, <a href="/search/cs?searchtype=author&amp;query=Labarta%2C+J">Jesus Labarta</a>, <a href="/search/cs?searchtype=author&amp;query=Cort%C3%A9s%2C+U">Ulises Cort茅s</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1707.07465v2-abstract-short" style="display: inline;"> Patterns stored within pre-trained deep neural networks compose large and powerful descriptive languages that can be used for many different purposes. Typically, deep network representations are implemented within vector embedding spaces, which enables the use of traditional machine learning algorithms on top of them. In this short paper we propose the construction of a graph embedding space inste&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1707.07465v2-abstract-full').style.display = 'inline'; document.getElementById('1707.07465v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1707.07465v2-abstract-full" style="display: none;"> Patterns stored within pre-trained deep neural networks compose large and powerful descriptive languages that can be used for many different purposes. Typically, deep network representations are implemented within vector embedding spaces, which enables the use of traditional machine learning algorithms on top of them. In this short paper we propose the construction of a graph embedding space instead, introducing a methodology to transform the knowledge coded within a deep convolutional network into a topological space (i.e. a network). We outline how such graph can hold data instances, data features, relations between instances and features, and relations among features. Finally, we introduce some preliminary experiments to illustrate how the resultant graph embedding space can be exploited through graph analytics algorithms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1707.07465v2-abstract-full').style.display = 'none'; document.getElementById('1707.07465v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 August, 2017; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 July, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at the 2nd Workshop on Semantic Deep Learning (SemDeep-2)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1707.06197">arXiv:1707.06197</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1707.06197">pdf</a>, <a href="https://arxiv.org/format/1707.06197">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Can GAN Learn Topological Features of a Graph? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+W">Weiyi Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+P">Pin-Yu Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Cooper%2C+H">Hal Cooper</a>, <a href="/search/cs?searchtype=author&amp;query=Oh%2C+M+H">Min Hwan Oh</a>, <a href="/search/cs?searchtype=author&amp;query=Yeung%2C+S">Sailung Yeung</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1707.06197v1-abstract-short" style="display: inline;"> This paper is first-line research expanding GANs into graph topology analysis. By leveraging the hierarchical connectivity structure of a graph, we have demonstrated that generative adversarial networks (GANs) can successfully capture topological features of any arbitrary graph, and rank edge sets by different stages according to their contribution to topology reconstruction. Moreover, in addition&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1707.06197v1-abstract-full').style.display = 'inline'; document.getElementById('1707.06197v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1707.06197v1-abstract-full" style="display: none;"> This paper is first-line research expanding GANs into graph topology analysis. By leveraging the hierarchical connectivity structure of a graph, we have demonstrated that generative adversarial networks (GANs) can successfully capture topological features of any arbitrary graph, and rank edge sets by different stages according to their contribution to topology reconstruction. Moreover, in addition to acting as an indicator of graph reconstruction, we find that these stages can also preserve important topological features in a graph. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1707.06197v1-abstract-full').style.display = 'none'; document.getElementById('1707.06197v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 July, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2017. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1705.07706">arXiv:1705.07706</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1705.07706">pdf</a>, <a href="https://arxiv.org/format/1705.07706">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> </div> </div> <p class="title is-5 mathjax"> An Out-of-the-box Full-network Embedding for Convolutional Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Garcia-Gasulla%2C+D">Dario Garcia-Gasulla</a>, <a href="/search/cs?searchtype=author&amp;query=Vilalta%2C+A">Armand Vilalta</a>, <a href="/search/cs?searchtype=author&amp;query=Par%C3%A9s%2C+F">Ferran Par茅s</a>, <a href="/search/cs?searchtype=author&amp;query=Moreno%2C+J">Jonatan Moreno</a>, <a href="/search/cs?searchtype=author&amp;query=Ayguad%C3%A9%2C+E">Eduard Ayguad茅</a>, <a href="/search/cs?searchtype=author&amp;query=Labarta%2C+J">Jesus Labarta</a>, <a href="/search/cs?searchtype=author&amp;query=Cort%C3%A9s%2C+U">Ulises Cort茅s</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1705.07706v1-abstract-short" style="display: inline;"> Transfer learning for feature extraction can be used to exploit deep representations in contexts where there is very few training data, where there are limited computational resources, or when tuning the hyper-parameters needed for training is not an option. While previous contributions to feature extraction propose embeddings based on a single layer of the network, in this paper we propose a full&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1705.07706v1-abstract-full').style.display = 'inline'; document.getElementById('1705.07706v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1705.07706v1-abstract-full" style="display: none;"> Transfer learning for feature extraction can be used to exploit deep representations in contexts where there is very few training data, where there are limited computational resources, or when tuning the hyper-parameters needed for training is not an option. While previous contributions to feature extraction propose embeddings based on a single layer of the network, in this paper we propose a full-network embedding which successfully integrates convolutional and fully connected features, coming from all layers of a deep convolutional neural network. To do so, the embedding normalizes features in the context of the problem, and discretizes their values to reduce noise and regularize the embedding space. Significantly, this also reduces the computational cost of processing the resultant representations. The proposed method is shown to outperform single layer embeddings on several image classification tasks, while also being more robust to the choice of the pre-trained model used for obtaining the initial features. The performance gap in classification accuracy between thoroughly tuned solutions and the full-network embedding is also reduced, which makes of the proposed approach a competitive solution for a large set of applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1705.07706v1-abstract-full').style.display = 'none'; document.getElementById('1705.07706v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 May, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2017. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1704.06841">arXiv:1704.06841</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1704.06841">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Medical Text Classification using Convolutional Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hughes%2C+M">Mark Hughes</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+I">Irene Li</a>, <a href="/search/cs?searchtype=author&amp;query=Kotoulas%2C+S">Spyros Kotoulas</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1704.06841v1-abstract-short" style="display: inline;"> We present an approach to automatically classify clinical text at a sentence level. We are using deep convolutional neural networks to represent complex features. We train the network on a dataset providing a broad categorization of health information. Through a detailed evaluation, we demonstrate that our method outperforms several approaches widely used in natural language processing tasks by ab&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1704.06841v1-abstract-full').style.display = 'inline'; document.getElementById('1704.06841v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1704.06841v1-abstract-full" style="display: none;"> We present an approach to automatically classify clinical text at a sentence level. We are using deep convolutional neural networks to represent complex features. We train the network on a dataset providing a broad categorization of health information. Through a detailed evaluation, we demonstrate that our method outperforms several approaches widely used in natural language processing tasks by about 15%. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1704.06841v1-abstract-full').style.display = 'none'; document.getElementById('1704.06841v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 April, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2017. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1703.09307">arXiv:1703.09307</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1703.09307">pdf</a>, <a href="https://arxiv.org/format/1703.09307">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> </div> </div> <p class="title is-5 mathjax"> Fluid Communities: A Competitive, Scalable and Diverse Community Detection Algorithm </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Par%C3%A9s%2C+F">Ferran Par茅s</a>, <a href="/search/cs?searchtype=author&amp;query=Garcia-Gasulla%2C+D">Dario Garcia-Gasulla</a>, <a href="/search/cs?searchtype=author&amp;query=Vilalta%2C+A">Armand Vilalta</a>, <a href="/search/cs?searchtype=author&amp;query=Moreno%2C+J">Jonatan Moreno</a>, <a href="/search/cs?searchtype=author&amp;query=Ayguad%C3%A9%2C+E">Eduard Ayguad茅</a>, <a href="/search/cs?searchtype=author&amp;query=Labarta%2C+J">Jes煤s Labarta</a>, <a href="/search/cs?searchtype=author&amp;query=Cort%C3%A9s%2C+U">Ulises Cort茅s</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1703.09307v3-abstract-short" style="display: inline;"> We introduce a community detection algorithm (Fluid Communities) based on the idea of fluids interacting in an environment, expanding and contracting as a result of that interaction. Fluid Communities is based on the propagation methodology, which represents the state-of-the-art in terms of computational cost and scalability. While being highly efficient, Fluid Communities is able to find communit&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1703.09307v3-abstract-full').style.display = 'inline'; document.getElementById('1703.09307v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1703.09307v3-abstract-full" style="display: none;"> We introduce a community detection algorithm (Fluid Communities) based on the idea of fluids interacting in an environment, expanding and contracting as a result of that interaction. Fluid Communities is based on the propagation methodology, which represents the state-of-the-art in terms of computational cost and scalability. While being highly efficient, Fluid Communities is able to find communities in synthetic graphs with an accuracy close to the current best alternatives. Additionally, Fluid Communities is the first propagation-based algorithm capable of identifying a variable number of communities in network. To illustrate the relevance of the algorithm, we evaluate the diversity of the communities found by Fluid Communities, and find them to be significantly different from the ones found by alternative methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1703.09307v3-abstract-full').style.display = 'none'; document.getElementById('1703.09307v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 October, 2017; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 March, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at the 6th International Conference on Complex Networks and Their Applications</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1703.01127">arXiv:1703.01127</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1703.01127">pdf</a>, <a href="https://arxiv.org/format/1703.01127">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> On the Behavior of Convolutional Nets for Feature Extraction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Garcia-Gasulla%2C+D">Dario Garcia-Gasulla</a>, <a href="/search/cs?searchtype=author&amp;query=Par%C3%A9s%2C+F">Ferran Par茅s</a>, <a href="/search/cs?searchtype=author&amp;query=Vilalta%2C+A">Armand Vilalta</a>, <a href="/search/cs?searchtype=author&amp;query=Moreno%2C+J">Jonatan Moreno</a>, <a href="/search/cs?searchtype=author&amp;query=Ayguad%C3%A9%2C+E">Eduard Ayguad茅</a>, <a href="/search/cs?searchtype=author&amp;query=Labarta%2C+J">Jes煤s Labarta</a>, <a href="/search/cs?searchtype=author&amp;query=Cort%C3%A9s%2C+U">Ulises Cort茅s</a>, <a href="/search/cs?searchtype=author&amp;query=Suzumura%2C+T">Toyotaro Suzumura</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1703.01127v4-abstract-short" style="display: inline;"> Deep neural networks are representation learning techniques. During training, a deep net is capable of generating a descriptive language of unprecedented size and detail in machine learning. Extracting the descriptive language coded within a trained CNN model (in the case of image data), and reusing it for other purposes is a field of interest, as it provides access to the visual descriptors previ&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1703.01127v4-abstract-full').style.display = 'inline'; document.getElementById('1703.01127v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1703.01127v4-abstract-full" style="display: none;"> Deep neural networks are representation learning techniques. During training, a deep net is capable of generating a descriptive language of unprecedented size and detail in machine learning. Extracting the descriptive language coded within a trained CNN model (in the case of image data), and reusing it for other purposes is a field of interest, as it provides access to the visual descriptors previously learnt by the CNN after processing millions of images, without requiring an expensive training phase. Contributions to this field (commonly known as feature representation transfer or transfer learning) have been purely empirical so far, extracting all CNN features from a single layer close to the output and testing their performance by feeding them to a classifier. This approach has provided consistent results, although its relevance is limited to classification tasks. In a completely different approach, in this paper we statistically measure the discriminative power of every single feature found within a deep CNN, when used for characterizing every class of 11 datasets. We seek to provide new insights into the behavior of CNN features, particularly the ones from convolutional layers, as this can be relevant for their application to knowledge representation and reasoning. Our results confirm that low and middle level features may behave differently to high level features, but only under certain conditions. We find that all CNN features can be used for knowledge representation purposes both by their presence or by their absence, doubling the information a single CNN feature may provide. We also study how much noise these features may include, and propose a thresholding approach to discard most of it. All these insights have a direct application to the generation of CNN embedding spaces. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1703.01127v4-abstract-full').style.display = 'none'; document.getElementById('1703.01127v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 January, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 March, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published in the Journal of Artificial Intelligence Research (JAIR), Special Track on Deep Learning, Knowledge Representation, and Reasoning</span> </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Suzumura%2C+T&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Suzumura%2C+T&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Suzumura%2C+T&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10