CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–36 of 36 results for author: <span class="mathjax">Bertozzi, A L</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&query=Bertozzi%2C+A+L">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Bertozzi, A L"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Bertozzi%2C+A+L&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Bertozzi, A L"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.03614">arXiv:2411.03614</a> <span> [<a href="https://arxiv.org/pdf/2411.03614">pdf</a>, <a href="https://arxiv.org/format/2411.03614">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ACC.2012.6315420">10.1109/ACC.2012.6315420 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Robot Swarming over the internet </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ferenc%2C+W">Will Ferenc</a>, <a href="/search/cs?searchtype=author&query=Kastein%2C+H">Hannah Kastein</a>, <a href="/search/cs?searchtype=author&query=Lieu%2C+L">Lauren Lieu</a>, <a href="/search/cs?searchtype=author&query=Wilson%2C+R">Ryan Wilson</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+Y+R">Yuan Rick Huang</a>, <a href="/search/cs?searchtype=author&query=Gilles%2C+J">Jerome Gilles</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a>, <a href="/search/cs?searchtype=author&query=Sharma%2C+B+R">Balaji R. Sharma</a>, <a href="/search/cs?searchtype=author&query=HomChaudhuri%2C+B">Baisravan HomChaudhuri</a>, <a href="/search/cs?searchtype=author&query=Ramakrishnan%2C+S">Subramanian Ramakrishnan</a>, <a href="/search/cs?searchtype=author&query=Kumar%2C+M">Manish Kumar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.03614v1-abstract-short" style="display: inline;"> This paper considers cooperative control of robots involving two different testbed systems in remote locations with communication on the internet. This provides us the capability to exchange robots status like positions, velocities and directions needed for the swarming algorithm. The results show that all robots properly follow some leader defined one of the testbeds. Measurement of data exchange… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03614v1-abstract-full').style.display = 'inline'; document.getElementById('2411.03614v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.03614v1-abstract-full" style="display: none;"> This paper considers cooperative control of robots involving two different testbed systems in remote locations with communication on the internet. This provides us the capability to exchange robots status like positions, velocities and directions needed for the swarming algorithm. The results show that all robots properly follow some leader defined one of the testbeds. Measurement of data exchange rates show no loss of packets, and average transfer delays stay within tolerance limits for practical applications. In our knowledge, the novelty of this paper concerns this kind of control over a large network like internet. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03614v1-abstract-full').style.display = 'none'; document.getElementById('2411.03614v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> 012 American Control Conference, Montreal, Canada, June 2012 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.02435">arXiv:2411.02435</a> <span> [<a href="https://arxiv.org/pdf/2411.02435">pdf</a>, <a href="https://arxiv.org/format/2411.02435">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Narrative Analysis of True Crime Podcasts With Knowledge Graph-Augmented Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Leng%2C+X">Xinyi Leng</a>, <a href="/search/cs?searchtype=author&query=Liang%2C+J">Jason Liang</a>, <a href="/search/cs?searchtype=author&query=Mauro%2C+J">Jack Mauro</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+X">Xu Wang</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a>, <a href="/search/cs?searchtype=author&query=Chapman%2C+J">James Chapman</a>, <a href="/search/cs?searchtype=author&query=Lin%2C+J">Junyuan Lin</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+B">Bohan Chen</a>, <a href="/search/cs?searchtype=author&query=Ye%2C+C">Chenchen Ye</a>, <a href="/search/cs?searchtype=author&query=Daniel%2C+T">Temple Daniel</a>, <a href="/search/cs?searchtype=author&query=Brantingham%2C+P+J">P. Jeffrey Brantingham</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.02435v1-abstract-short" style="display: inline;"> Narrative data spans all disciplines and provides a coherent model of the world to the reader or viewer. Recent advancement in machine learning and Large Language Models (LLMs) have enable great strides in analyzing natural language. However, Large language models (LLMs) still struggle with complex narrative arcs as well as narratives containing conflicting information. Recent work indicates LLMs… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02435v1-abstract-full').style.display = 'inline'; document.getElementById('2411.02435v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.02435v1-abstract-full" style="display: none;"> Narrative data spans all disciplines and provides a coherent model of the world to the reader or viewer. Recent advancement in machine learning and Large Language Models (LLMs) have enable great strides in analyzing natural language. However, Large language models (LLMs) still struggle with complex narrative arcs as well as narratives containing conflicting information. Recent work indicates LLMs augmented with external knowledge bases can improve the accuracy and interpretability of the resulting models. In this work, we analyze the effectiveness of applying knowledge graphs (KGs) in understanding true-crime podcast data from both classical Natural Language Processing (NLP) and LLM approaches. We directly compare KG-augmented LLMs (KGLLMs) with classical methods for KG construction, topic modeling, and sentiment analysis. Additionally, the KGLLM allows us to query the knowledge base in natural language and test its ability to factually answer questions. We examine the robustness of the model to adversarial prompting in order to test the model's ability to deal with conflicting information. Finally, we apply classical methods to understand more subtle aspects of the text such as the use of hearsay and sentiment in narrative construction and propose future directions. Our results indicate that KGLLMs outperform LLMs on a variety of metrics, are more robust to adversarial prompts, and are more capable of summarizing the text into topics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.02435v1-abstract-full').style.display = 'none'; document.getElementById('2411.02435v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 Pages, 3 Figures, GTA3 Workshop-2024, October 2024, 33rd International Conference on Information and Knowledge Management, Boise, Idaho, USA</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.00281">arXiv:2411.00281</a> <span> [<a href="https://arxiv.org/pdf/2411.00281">pdf</a>, <a href="https://arxiv.org/format/2411.00281">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1117/12.2015155">10.1117/12.2015155 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Detection and tracking of gas plumes in LWIR hyperspectral video sequence data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gerhart%2C+T">Torin Gerhart</a>, <a href="/search/cs?searchtype=author&query=Sunu%2C+J">Justin Sunu</a>, <a href="/search/cs?searchtype=author&query=Merkurjev%2C+E">Ekaterina Merkurjev</a>, <a href="/search/cs?searchtype=author&query=Chang%2C+J">Jen-Mei Chang</a>, <a href="/search/cs?searchtype=author&query=Gilles%2C+J">Jerome Gilles</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.00281v1-abstract-short" style="display: inline;"> Automated detection of chemical plumes presents a segmentation challenge. The segmentation problem for gas plumes is difficult due to the diffusive nature of the cloud. The advantage of considering hyperspectral images in the gas plume detection problem over the conventional RGB imagery is the presence of non-visual data, allowing for a richer representation of information. In this paper we presen… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.00281v1-abstract-full').style.display = 'inline'; document.getElementById('2411.00281v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.00281v1-abstract-full" style="display: none;"> Automated detection of chemical plumes presents a segmentation challenge. The segmentation problem for gas plumes is difficult due to the diffusive nature of the cloud. The advantage of considering hyperspectral images in the gas plume detection problem over the conventional RGB imagery is the presence of non-visual data, allowing for a richer representation of information. In this paper we present an effective method of visualizing hyperspectral video sequences containing chemical plumes and investigate the effectiveness of segmentation techniques on these post-processed videos. Our approach uses a combination of dimension reduction and histogram equalization to prepare the hyperspectral videos for segmentation. First, Principal Components Analysis (PCA) is used to reduce the dimension of the entire video sequence. This is done by projecting each pixel onto the first few Principal Components resulting in a type of spectral filter. Next, a Midway method for histogram equalization is used. These methods redistribute the intensity values in order to reduce flicker between frames. This properly prepares these high-dimensional video sequences for more traditional segmentation techniques. We compare the ability of various clustering techniques to properly segment the chemical plume. These include K-means, spectral clustering, and the Ginzburg-Landau functional. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.00281v1-abstract-full').style.display = 'none'; document.getElementById('2411.00281v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> SPIE Defense, Security, and Sensing, 2013, Baltimore, Proceedings Volume 8743, Algorithms and Technologies for Multispectral, Hyperspectral, and Ultraspectral Imagery XIX; 87430J (2013) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.13781">arXiv:2406.13781</a> <span> [<a href="https://arxiv.org/pdf/2406.13781">pdf</a>, <a href="https://arxiv.org/format/2406.13781">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> A Primal-Dual Framework for Transformers and Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Nguyen%2C+T+M">Tan M. Nguyen</a>, <a href="/search/cs?searchtype=author&query=Nguyen%2C+T">Tam Nguyen</a>, <a href="/search/cs?searchtype=author&query=Ho%2C+N">Nhat Ho</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a>, <a href="/search/cs?searchtype=author&query=Baraniuk%2C+R+G">Richard G. Baraniuk</a>, <a href="/search/cs?searchtype=author&query=Osher%2C+S+J">Stanley J. Osher</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.13781v1-abstract-short" style="display: inline;"> Self-attention is key to the remarkable success of transformers in sequence modeling tasks including many applications in natural language processing and computer vision. Like neural network layers, these attention mechanisms are often developed by heuristics and experience. To provide a principled framework for constructing attention layers in transformers, we show that the self-attention corresp… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.13781v1-abstract-full').style.display = 'inline'; document.getElementById('2406.13781v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.13781v1-abstract-full" style="display: none;"> Self-attention is key to the remarkable success of transformers in sequence modeling tasks including many applications in natural language processing and computer vision. Like neural network layers, these attention mechanisms are often developed by heuristics and experience. To provide a principled framework for constructing attention layers in transformers, we show that the self-attention corresponds to the support vector expansion derived from a support vector regression problem, whose primal formulation has the form of a neural network layer. Using our framework, we derive popular attention layers used in practice and propose two new attentions: 1) the Batch Normalized Attention (Attention-BN) derived from the batch normalization layer and 2) the Attention with Scaled Head (Attention-SH) derived from using less training data to fit the SVR model. We empirically demonstrate the advantages of the Attention-BN and Attention-SH in reducing head redundancy, increasing the model's accuracy, and improving the model's efficiency in a variety of practical applications including image and time-series classification. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.13781v1-abstract-full').style.display = 'none'; document.getElementById('2406.13781v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to ICLR 2023, 26 pages, 4 figures, 14 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.14740">arXiv:2311.14740</a> <span> [<a href="https://arxiv.org/pdf/2311.14740">pdf</a>, <a href="https://arxiv.org/format/2311.14740">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> AutoKG: Efficient Automated Knowledge Graph Generation for Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chen%2C+B">Bohan Chen</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.14740v1-abstract-short" style="display: inline;"> Traditional methods of linking large language models (LLMs) to knowledge bases via the semantic similarity search often fall short of capturing complex relational dynamics. To address these limitations, we introduce AutoKG, a lightweight and efficient approach for automated knowledge graph (KG) construction. For a given knowledge base consisting of text blocks, AutoKG first extracts keywords using… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.14740v1-abstract-full').style.display = 'inline'; document.getElementById('2311.14740v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.14740v1-abstract-full" style="display: none;"> Traditional methods of linking large language models (LLMs) to knowledge bases via the semantic similarity search often fall short of capturing complex relational dynamics. To address these limitations, we introduce AutoKG, a lightweight and efficient approach for automated knowledge graph (KG) construction. For a given knowledge base consisting of text blocks, AutoKG first extracts keywords using a LLM and then evaluates the relationship weight between each pair of keywords using graph Laplace learning. We employ a hybrid search scheme combining vector similarity and graph-based associations to enrich LLM responses. Preliminary experiments demonstrate that AutoKG offers a more comprehensive and interconnected knowledge retrieval mechanism compared to the semantic similarity search, thereby enhancing the capabilities of LLMs in generating more insightful and relevant outputs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.14740v1-abstract-full').style.display = 'none'; document.getElementById('2311.14740v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, accepted by IEEE BigData 2023 as a workshop paper in GTA3</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.11163">arXiv:2311.11163</a> <span> [<a href="https://arxiv.org/pdf/2311.11163">pdf</a>, <a href="https://arxiv.org/format/2311.11163">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applications">stat.AP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation">stat.CO</span> </div> </div> <p class="title is-5 mathjax"> Hate speech and hate crimes: a data-driven study of evolving discourse around marginalized groups </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Bozhidarova%2C+M">Malvina Bozhidarova</a>, <a href="/search/cs?searchtype=author&query=Chang%2C+J">Jonathn Chang</a>, <a href="/search/cs?searchtype=author&query=Ale-rasool%2C+A">Aaishah Ale-rasool</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+Y">Yuxiang Liu</a>, <a href="/search/cs?searchtype=author&query=Ma%2C+C">Chongyao Ma</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a>, <a href="/search/cs?searchtype=author&query=Brantingham%2C+P+J">P. Jeffrey Brantingham</a>, <a href="/search/cs?searchtype=author&query=Lin%2C+J">Junyuan Lin</a>, <a href="/search/cs?searchtype=author&query=Krishnagopal%2C+S">Sanjukta Krishnagopal</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.11163v1-abstract-short" style="display: inline;"> This study explores the dynamic relationship between online discourse, as observed in tweets, and physical hate crimes, focusing on marginalized groups. Leveraging natural language processing techniques, including keyword extraction and topic modeling, we analyze the evolution of online discourse after events affecting these groups. Examining sentiment and polarizing tweets, we establish correlati… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.11163v1-abstract-full').style.display = 'inline'; document.getElementById('2311.11163v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.11163v1-abstract-full" style="display: none;"> This study explores the dynamic relationship between online discourse, as observed in tweets, and physical hate crimes, focusing on marginalized groups. Leveraging natural language processing techniques, including keyword extraction and topic modeling, we analyze the evolution of online discourse after events affecting these groups. Examining sentiment and polarizing tweets, we establish correlations with hate crimes in Black and LGBTQ+ communities. Using a knowledge graph, we connect tweets, users, topics, and hate crimes, enabling network analyses. Our findings reveal divergent patterns in the evolution of user communities for Black and LGBTQ+ groups, with notable differences in sentiment among influential users. This analysis sheds light on distinctive online discourse patterns and emphasizes the need to monitor hate speech to prevent hate crimes, especially following significant events impacting marginalized communities. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.11163v1-abstract-full').style.display = 'none'; document.getElementById('2311.11163v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.10495">arXiv:2307.10495</a> <span> [<a href="https://arxiv.org/pdf/2307.10495">pdf</a>, <a href="https://arxiv.org/format/2307.10495">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1117/12.2662393">10.1117/12.2662393 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Novel Batch Active Learning Approach and Its Application to Synthetic Aperture Radar Datasets </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chapman%2C+J">James Chapman</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+B">Bohan Chen</a>, <a href="/search/cs?searchtype=author&query=Tan%2C+Z">Zheng Tan</a>, <a href="/search/cs?searchtype=author&query=Calder%2C+J">Jeff Calder</a>, <a href="/search/cs?searchtype=author&query=Miller%2C+K">Kevin Miller</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.10495v1-abstract-short" style="display: inline;"> Active learning improves the performance of machine learning methods by judiciously selecting a limited number of unlabeled data points to query for labels, with the aim of maximally improving the underlying classifier's performance. Recent gains have been made using sequential active learning for synthetic aperture radar (SAR) data arXiv:2204.00005. In each iteration, sequential active learning s… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.10495v1-abstract-full').style.display = 'inline'; document.getElementById('2307.10495v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.10495v1-abstract-full" style="display: none;"> Active learning improves the performance of machine learning methods by judiciously selecting a limited number of unlabeled data points to query for labels, with the aim of maximally improving the underlying classifier's performance. Recent gains have been made using sequential active learning for synthetic aperture radar (SAR) data arXiv:2204.00005. In each iteration, sequential active learning selects a query set of size one while batch active learning selects a query set of multiple datapoints. While batch active learning methods exhibit greater efficiency, the challenge lies in maintaining model accuracy relative to sequential active learning methods. We developed a novel, two-part approach for batch active learning: Dijkstra's Annulus Core-Set (DAC) for core-set generation and LocalMax for batch sampling. The batch active learning process that combines DAC and LocalMax achieves nearly identical accuracy as sequential active learning but is more efficient, proportional to the batch size. As an application, a pipeline is built based on transfer learning feature embedding, graph learning, DAC, and LocalMax to classify the FUSAR-Ship and OpenSARShip datasets. Our pipeline outperforms the state-of-the-art CNN-based methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.10495v1-abstract-full').style.display = 'none'; document.getElementById('2307.10495v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 pages, 7 figures, Preprint</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.6; I.2.10; I.4.0; I.4.9 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Proc. SPIE. Algorithms for Synthetic Aperture Radar Imagery XXX (Vol. 12520, pp. 96-111). 13 June 2023 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.00119">arXiv:2211.00119</a> <span> [<a href="https://arxiv.org/pdf/2211.00119">pdf</a>, <a href="https://arxiv.org/format/2211.00119">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ICASSP49357.2023.10096465">10.1109/ICASSP49357.2023.10096465 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Active Learning of Non-semantic Speech Tasks with Pretrained Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Lee%2C+H">Harlin Lee</a>, <a href="/search/cs?searchtype=author&query=Saeed%2C+A">Aaqib Saeed</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.00119v4-abstract-short" style="display: inline;"> Pretraining neural networks with massive unlabeled datasets has become popular as it equips the deep models with a better prior to solve downstream tasks. However, this approach generally assumes that the downstream tasks have access to annotated data of sufficient size. In this work, we propose ALOE, a novel system for improving the data- and label-efficiency of non-semantic speech tasks with act… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.00119v4-abstract-full').style.display = 'inline'; document.getElementById('2211.00119v4-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.00119v4-abstract-full" style="display: none;"> Pretraining neural networks with massive unlabeled datasets has become popular as it equips the deep models with a better prior to solve downstream tasks. However, this approach generally assumes that the downstream tasks have access to annotated data of sufficient size. In this work, we propose ALOE, a novel system for improving the data- and label-efficiency of non-semantic speech tasks with active learning. ALOE uses pretrained models in conjunction with active learning to label data incrementally and learn classifiers for downstream tasks, thereby mitigating the need to acquire labeled data beforehand. We demonstrate the effectiveness of ALOE on a wide range of tasks, uncertainty-based acquisition functions, and model architectures. Training a linear classifier on top of a frozen encoder with ALOE is shown to achieve performance similar to several baselines that utilize the entire labeled data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.00119v4-abstract-full').style.display = 'none'; document.getElementById('2211.00119v4-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 February, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 31 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at: ICASSP'23, Code: https://github.com/HarlinLee/ALOE</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2204.08621">arXiv:2204.08621</a> <span> [<a href="https://arxiv.org/pdf/2204.08621">pdf</a>, <a href="https://arxiv.org/format/2204.08621">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Numerical Analysis">math.NA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Proximal Implicit ODE Solvers for Accelerating Learning Neural ODEs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Baker%2C+J">Justin Baker</a>, <a href="/search/cs?searchtype=author&query=Xia%2C+H">Hedi Xia</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+Y">Yiwei Wang</a>, <a href="/search/cs?searchtype=author&query=Cherkaev%2C+E">Elena Cherkaev</a>, <a href="/search/cs?searchtype=author&query=Narayan%2C+A">Akil Narayan</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+L">Long Chen</a>, <a href="/search/cs?searchtype=author&query=Xin%2C+J">Jack Xin</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a>, <a href="/search/cs?searchtype=author&query=Osher%2C+S+J">Stanley J. Osher</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+B">Bao Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2204.08621v1-abstract-short" style="display: inline;"> Learning neural ODEs often requires solving very stiff ODE systems, primarily using explicit adaptive step size ODE solvers. These solvers are computationally expensive, requiring the use of tiny step sizes for numerical stability and accuracy guarantees. This paper considers learning neural ODEs using implicit ODE solvers of different orders leveraging proximal operators. The proximal implicit so… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.08621v1-abstract-full').style.display = 'inline'; document.getElementById('2204.08621v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2204.08621v1-abstract-full" style="display: none;"> Learning neural ODEs often requires solving very stiff ODE systems, primarily using explicit adaptive step size ODE solvers. These solvers are computationally expensive, requiring the use of tiny step sizes for numerical stability and accuracy guarantees. This paper considers learning neural ODEs using implicit ODE solvers of different orders leveraging proximal operators. The proximal implicit solver consists of inner-outer iterations: the inner iterations approximate each implicit update step using a fast optimization algorithm, and the outer iterations solve the ODE system over time. The proximal implicit ODE solver guarantees superiority over explicit solvers in numerical stability and computational efficiency. We validate the advantages of proximal implicit solvers over existing popular neural ODE solvers on various challenging benchmark tasks, including learning continuous-depth graph neural networks and continuous normalizing flows. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.08621v1-abstract-full').style.display = 'none'; document.getElementById('2204.08621v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 April, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">20 pages, 7 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68T07; 65L04 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2204.00005">arXiv:2204.00005</a> <span> [<a href="https://arxiv.org/pdf/2204.00005">pdf</a>, <a href="https://arxiv.org/format/2204.00005">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Numerical Analysis">math.NA</span> </div> </div> <p class="title is-5 mathjax"> Graph-based Active Learning for Semi-supervised Classification of SAR Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Miller%2C+K">Kevin Miller</a>, <a href="/search/cs?searchtype=author&query=Mauro%2C+J">John Mauro</a>, <a href="/search/cs?searchtype=author&query=Setiadi%2C+J">Jason Setiadi</a>, <a href="/search/cs?searchtype=author&query=Baca%2C+X">Xoaquin Baca</a>, <a href="/search/cs?searchtype=author&query=Shi%2C+Z">Zhan Shi</a>, <a href="/search/cs?searchtype=author&query=Calder%2C+J">Jeff Calder</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2204.00005v1-abstract-short" style="display: inline;"> We present a novel method for classification of Synthetic Aperture Radar (SAR) data by combining ideas from graph-based learning and neural network methods within an active learning framework. Graph-based methods in machine learning are based on a similarity graph constructed from the data. When the data consists of raw images composed of scenes, extraneous information can make the classification… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.00005v1-abstract-full').style.display = 'inline'; document.getElementById('2204.00005v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2204.00005v1-abstract-full" style="display: none;"> We present a novel method for classification of Synthetic Aperture Radar (SAR) data by combining ideas from graph-based learning and neural network methods within an active learning framework. Graph-based methods in machine learning are based on a similarity graph constructed from the data. When the data consists of raw images composed of scenes, extraneous information can make the classification task more difficult. In recent years, neural network methods have been shown to provide a promising framework for extracting patterns from SAR images. These methods, however, require ample training data to avoid overfitting. At the same time, such training data are often unavailable for applications of interest, such as automatic target recognition (ATR) and SAR data. We use a Convolutional Neural Network Variational Autoencoder (CNNVAE) to embed SAR data into a feature space, and then construct a similarity graph from the embedded data and apply graph-based semi-supervised learning techniques. The CNNVAE feature embedding and graph construction requires no labeled data, which reduces overfitting and improves the generalization performance of graph learning at low label rates. Furthermore, the method easily incorporates a human-in-the-loop for active learning in the data-labeling process. We present promising results and compare them to other standard machine learning methods on the Moving and Stationary Target Acquisition and Recognition (MSTAR) dataset for ATR with small amounts of labeled data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.00005v1-abstract-full').style.display = 'none'; document.getElementById('2204.00005v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68R10; 68T07; 68T05 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.6; I.2.10; I.4.0; I.4.9 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.15486">arXiv:2112.15486</a> <span> [<a href="https://arxiv.org/pdf/2112.15486">pdf</a>, <a href="https://arxiv.org/format/2112.15486">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Numerical Analysis">math.NA</span> </div> </div> <p class="title is-5 mathjax"> Efficient and Reliable Overlay Networks for Decentralized Federated Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Hua%2C+Y">Yifan Hua</a>, <a href="/search/cs?searchtype=author&query=Miller%2C+K">Kevin Miller</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a>, <a href="/search/cs?searchtype=author&query=Qian%2C+C">Chen Qian</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+B">Bao Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.15486v1-abstract-short" style="display: inline;"> We propose near-optimal overlay networks based on $d$-regular expander graphs to accelerate decentralized federated learning (DFL) and improve its generalization. In DFL a massive number of clients are connected by an overlay network, and they solve machine learning problems collaboratively without sharing raw data. Our overlay network design integrates spectral graph theory and the theoretical co… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.15486v1-abstract-full').style.display = 'inline'; document.getElementById('2112.15486v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.15486v1-abstract-full" style="display: none;"> We propose near-optimal overlay networks based on $d$-regular expander graphs to accelerate decentralized federated learning (DFL) and improve its generalization. In DFL a massive number of clients are connected by an overlay network, and they solve machine learning problems collaboratively without sharing raw data. Our overlay network design integrates spectral graph theory and the theoretical convergence and generalization bounds for DFL. As such, our proposed overlay networks accelerate convergence, improve generalization, and enhance robustness to clients failures in DFL with theoretical guarantees. Also, we present an efficient algorithm to convert a given graph to a practical overlay network and maintaining the network topology after potential client failures. We numerically verify the advantages of DFL with our proposed networks on various benchmark tasks, ranging from image classification to language modeling using hundreds of clients. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.15486v1-abstract-full').style.display = 'none'; document.getElementById('2112.15486v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">25 pages, 8 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 65B99; 68T01; 68T09; 68W15 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.07739">arXiv:2110.07739</a> <span> [<a href="https://arxiv.org/pdf/2110.07739">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/s42967-023-00328-z">10.1007/s42967-023-00328-z <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Model-Change Active Learning in Graph-Based Semi-Supervised Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Miller%2C+K">Kevin Miller</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.07739v2-abstract-short" style="display: inline;"> Active learning in semi-supervised classification involves introducing additional labels for unlabelled data to improve the accuracy of the underlying classifier. A challenge is to identify which points to label to best improve performance while limiting the number of new labels. "Model Change" active learning quantifies the resulting change incurred in the classifier by introducing the additional… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.07739v2-abstract-full').style.display = 'inline'; document.getElementById('2110.07739v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.07739v2-abstract-full" style="display: none;"> Active learning in semi-supervised classification involves introducing additional labels for unlabelled data to improve the accuracy of the underlying classifier. A challenge is to identify which points to label to best improve performance while limiting the number of new labels. "Model Change" active learning quantifies the resulting change incurred in the classifier by introducing the additional label(s). We pair this idea with graph-based semi-supervised learning methods, that use the spectrum of the graph Laplacian matrix, which can be truncated to avoid prohibitively large computational and storage costs. We consider a family of convex loss functions for which the acquisition function can be efficiently approximated using the Laplace approximation of the posterior distribution. We show a variety of multiclass examples that illustrate improved performance over prior state-of-art. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.07739v2-abstract-full').style.display = 'none'; document.getElementById('2110.07739v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Commun. Appl. Math. Comput. 6, 1270-1298 (2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.04932">arXiv:2110.04932</a> <span> [<a href="https://arxiv.org/pdf/2110.04932">pdf</a>, <a href="https://arxiv.org/format/2110.04932">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> An Analysis of COVID-19 Knowledge Graph Construction and Applications </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Flocco%2C+D">Dominic Flocco</a>, <a href="/search/cs?searchtype=author&query=Palmer-Toy%2C+B">Bryce Palmer-Toy</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+R">Ruixiao Wang</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+H">Hongyu Zhu</a>, <a href="/search/cs?searchtype=author&query=Sonthalia%2C+R">Rishi Sonthalia</a>, <a href="/search/cs?searchtype=author&query=Lin%2C+J">Junyuan Lin</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a>, <a href="/search/cs?searchtype=author&query=Brantingham%2C+P+J">P. Jeffrey Brantingham</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.04932v1-abstract-short" style="display: inline;"> The construction and application of knowledge graphs have seen a rapid increase across many disciplines in recent years. Additionally, the problem of uncovering relationships between developments in the COVID-19 pandemic and social media behavior is of great interest to researchers hoping to curb the spread of the disease. In this paper we present a knowledge graph constructed from COVID-19 relate… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.04932v1-abstract-full').style.display = 'inline'; document.getElementById('2110.04932v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.04932v1-abstract-full" style="display: none;"> The construction and application of knowledge graphs have seen a rapid increase across many disciplines in recent years. Additionally, the problem of uncovering relationships between developments in the COVID-19 pandemic and social media behavior is of great interest to researchers hoping to curb the spread of the disease. In this paper we present a knowledge graph constructed from COVID-19 related tweets in the Los Angeles area, supplemented with federal and state policy announcements and disease spread statistics. By incorporating dates, topics, and events as entities, we construct a knowledge graph that describes the connections between these useful information. We use natural language processing and change point analysis to extract tweet-topic, tweet-date, and event-date relations. Further analysis on the constructed knowledge graph provides insight into how tweets reflect public sentiments towards COVID-19 related topics and how changes in these sentiments correlate with real-world events. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.04932v1-abstract-full').style.display = 'none'; document.getElementById('2110.04932v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.04840">arXiv:2110.04840</a> <span> [<a href="https://arxiv.org/pdf/2110.04840">pdf</a>, <a href="https://arxiv.org/format/2110.04840">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Dynamical Systems">math.DS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Numerical Analysis">math.NA</span> </div> </div> <p class="title is-5 mathjax"> Heavy Ball Neural Ordinary Differential Equations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Xia%2C+H">Hedi Xia</a>, <a href="/search/cs?searchtype=author&query=Suliafu%2C+V">Vai Suliafu</a>, <a href="/search/cs?searchtype=author&query=Ji%2C+H">Hangjie Ji</a>, <a href="/search/cs?searchtype=author&query=Nguyen%2C+T+M">Tan M. Nguyen</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a>, <a href="/search/cs?searchtype=author&query=Osher%2C+S+J">Stanley J. Osher</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+B">Bao Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.04840v1-abstract-short" style="display: inline;"> We propose heavy ball neural ordinary differential equations (HBNODEs), leveraging the continuous limit of the classical momentum accelerated gradient descent, to improve neural ODEs (NODEs) training and inference. HBNODEs have two properties that imply practical advantages over NODEs: (i) The adjoint state of an HBNODE also satisfies an HBNODE, accelerating both forward and backward ODE solvers,… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.04840v1-abstract-full').style.display = 'inline'; document.getElementById('2110.04840v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.04840v1-abstract-full" style="display: none;"> We propose heavy ball neural ordinary differential equations (HBNODEs), leveraging the continuous limit of the classical momentum accelerated gradient descent, to improve neural ODEs (NODEs) training and inference. HBNODEs have two properties that imply practical advantages over NODEs: (i) The adjoint state of an HBNODE also satisfies an HBNODE, accelerating both forward and backward ODE solvers, thus significantly reducing the number of function evaluations (NFEs) and improving the utility of the trained models. (ii) The spectrum of HBNODEs is well structured, enabling effective learning of long-term dependencies from complex sequential data. We verify the advantages of HBNODEs over NODEs on benchmark tasks, including image classification, learning complex dynamics, and sequential modeling. Our method requires remarkably fewer forward and backward NFEs, is more accurate, and learns long-term dependencies more effectively than the other ODE-based neural network models. Code is available at \url{https://github.com/hedixia/HeavyBallNODE}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.04840v1-abstract-full').style.display = 'none'; document.getElementById('2110.04840v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">23 pages, 9 figures, Accepted for publication at Advances in Neural Information Processing Systems (NeurIPS) 2021</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68T07 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2107.01713">arXiv:2107.01713</a> <span> [<a href="https://arxiv.org/pdf/2107.01713">pdf</a>, <a href="https://arxiv.org/format/2107.01713">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Dynamical Systems">math.DS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Adaptation and Self-Organizing Systems">nlin.AO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Populations and Evolution">q-bio.PE</span> </div> </div> <p class="title is-5 mathjax"> A Multilayer Network Model of the Coevolution of the Spread of a Disease and Competing Opinions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Peng%2C+K">Kaiyan Peng</a>, <a href="/search/cs?searchtype=author&query=Lu%2C+Z">Zheng Lu</a>, <a href="/search/cs?searchtype=author&query=Lin%2C+V">Vanessa Lin</a>, <a href="/search/cs?searchtype=author&query=Lindstrom%2C+M+R">Michael R. Lindstrom</a>, <a href="/search/cs?searchtype=author&query=Parkinson%2C+C">Christian Parkinson</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+C">Chuntian Wang</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a>, <a href="/search/cs?searchtype=author&query=Porter%2C+M+A">Mason A. Porter</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2107.01713v1-abstract-short" style="display: inline;"> During the COVID-19 pandemic, conflicting opinions on physical distancing swept across social media, affecting both human behavior and the spread of COVID-19. Inspired by such phenomena, we construct a two-layer multiplex network for the coupled spread of a disease and conflicting opinions. We model each process as a contagion. On one layer, we consider the concurrent evolution of two opinions --… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.01713v1-abstract-full').style.display = 'inline'; document.getElementById('2107.01713v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2107.01713v1-abstract-full" style="display: none;"> During the COVID-19 pandemic, conflicting opinions on physical distancing swept across social media, affecting both human behavior and the spread of COVID-19. Inspired by such phenomena, we construct a two-layer multiplex network for the coupled spread of a disease and conflicting opinions. We model each process as a contagion. On one layer, we consider the concurrent evolution of two opinions -- pro-physical-distancing and anti-physical-distancing -- that compete with each other and have mutual immunity to each other. The disease evolves on the other layer, and individuals are less likely (respectively, more likely) to become infected when they adopt the pro-physical-distancing (respectively, anti-physical-distancing) opinion. We develop approximations of mean-field type by generalizing monolayer pair approximations to multilayer networks; these approximations agree well with Monte Carlo simulations for a broad range of parameters and several network structures. Through numerical simulations, we illustrate the influence of opinion dynamics on the spread of the disease from complex interactions both between the two conflicting opinions and between the opinions and the disease. We find that lengthening the duration that individuals hold an opinion may help suppress disease transmission, and we demonstrate that increasing the cross-layer correlations or intra-layer correlations of node degrees may lead to fewer individuals becoming infected with the disease. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.01713v1-abstract-full').style.display = 'none'; document.getElementById('2107.01713v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 July, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 91D30; 92D30; 37N25 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2105.10650">arXiv:2105.10650</a> <span> [<a href="https://arxiv.org/pdf/2105.10650">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Post-Radiotherapy PET Image Outcome Prediction by Deep Learning under Biological Model Guidance: A Feasibility Study of Oropharyngeal Cancer Application </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ji%2C+H">Hangjie Ji</a>, <a href="/search/cs?searchtype=author&query=Lafata%2C+K">Kyle Lafata</a>, <a href="/search/cs?searchtype=author&query=Mowery%2C+Y">Yvonne Mowery</a>, <a href="/search/cs?searchtype=author&query=Brizel%2C+D">David Brizel</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a>, <a href="/search/cs?searchtype=author&query=Yin%2C+F">Fang-Fang Yin</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+C">Chunhao Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2105.10650v1-abstract-short" style="display: inline;"> This paper develops a method of biologically guided deep learning for post-radiation FDG-PET image outcome prediction based on pre-radiation images and radiotherapy dose information. Based on the classic reaction-diffusion mechanism, a novel biological model was proposed using a partial differential equation that incorporates spatial radiation dose distribution as a patient-specific treatment info… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.10650v1-abstract-full').style.display = 'inline'; document.getElementById('2105.10650v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2105.10650v1-abstract-full" style="display: none;"> This paper develops a method of biologically guided deep learning for post-radiation FDG-PET image outcome prediction based on pre-radiation images and radiotherapy dose information. Based on the classic reaction-diffusion mechanism, a novel biological model was proposed using a partial differential equation that incorporates spatial radiation dose distribution as a patient-specific treatment information variable. A 7-layer encoder-decoder-based convolutional neural network (CNN) was designed and trained to learn the proposed biological model. As such, the model could generate post-radiation FDG-PET image outcome predictions with possible time-series transition from pre-radiotherapy image states to post-radiotherapy states. The proposed method was developed using 64 oropharyngeal patients with paired FDG-PET studies before and after 20Gy delivery (2Gy/daily fraction) by IMRT. In a two-branch deep learning execution, the proposed CNN learns specific terms in the biological model from paired FDG-PET images and spatial dose distribution as in one branch, and the biological model generates post-20Gy FDG-PET image prediction in the other branch. The proposed method successfully generated post-20Gy FDG-PET image outcome prediction with breakdown illustrations of biological model components. Time-series FDG-PET image predictions were generated to demonstrate the feasibility of disease response rendering. The developed biologically guided deep learning method achieved post-20Gy FDG-PET image outcome predictions in good agreement with ground-truth results. With break-down biological modeling components, the outcome image predictions could be used in adaptive radiotherapy decision-making to optimize personalized plans for the best outcome in the future. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.10650v1-abstract-full').style.display = 'none'; document.getElementById('2105.10650v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">26 pages, 5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2007.12809">arXiv:2007.12809</a> <span> [<a href="https://arxiv.org/pdf/2007.12809">pdf</a>, <a href="https://arxiv.org/format/2007.12809">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1088/1361-6420/ac1e80">10.1088/1361-6420/ac1e80 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Posterior Consistency of Semi-Supervised Regression on Graphs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a>, <a href="/search/cs?searchtype=author&query=Hosseini%2C+B">Bamdad Hosseini</a>, <a href="/search/cs?searchtype=author&query=Li%2C+H">Hao Li</a>, <a href="/search/cs?searchtype=author&query=Miller%2C+K">Kevin Miller</a>, <a href="/search/cs?searchtype=author&query=Stuart%2C+A+M">Andrew M. Stuart</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2007.12809v2-abstract-short" style="display: inline;"> Graph-based semi-supervised regression (SSR) is the problem of estimating the value of a function on a weighted graph from its values (labels) on a small subset of the vertices. This paper is concerned with the consistency of SSR in the context of classification, in the setting where the labels have small noise and the underlying graph weighting is consistent with well-clustered nodes. We present… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.12809v2-abstract-full').style.display = 'inline'; document.getElementById('2007.12809v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2007.12809v2-abstract-full" style="display: none;"> Graph-based semi-supervised regression (SSR) is the problem of estimating the value of a function on a weighted graph from its values (labels) on a small subset of the vertices. This paper is concerned with the consistency of SSR in the context of classification, in the setting where the labels have small noise and the underlying graph weighting is consistent with well-clustered nodes. We present a Bayesian formulation of SSR in which the weighted graph defines a Gaussian prior, using a graph Laplacian, and the labeled data defines a likelihood. We analyze the rate of contraction of the posterior measure around the ground truth in terms of parameters that quantify the small label error and inherent clustering in the graph. We obtain bounds on the rates of contraction and illustrate their sharpness through numerical experiments. The analysis also gives insight into the choice of hyperparameters that enter the definition of the prior. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.12809v2-abstract-full').style.display = 'none'; document.getElementById('2007.12809v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 March, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2007.11126">arXiv:2007.11126</a> <span> [<a href="https://arxiv.org/pdf/2007.11126">pdf</a>, <a href="https://arxiv.org/format/2007.11126">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Efficient Graph-Based Active Learning with Probit Likelihood via Gaussian Approximations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Miller%2C+K">Kevin Miller</a>, <a href="/search/cs?searchtype=author&query=Li%2C+H">Hao Li</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2007.11126v1-abstract-short" style="display: inline;"> We present a novel adaptation of active learning to graph-based semi-supervised learning (SSL) under non-Gaussian Bayesian models. We present an approximation of non-Gaussian distributions to adapt previously Gaussian-based acquisition functions to these more general cases. We develop an efficient rank-one update for applying "look-ahead" based methods as well as model retraining. We also introduc… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.11126v1-abstract-full').style.display = 'inline'; document.getElementById('2007.11126v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2007.11126v1-abstract-full" style="display: none;"> We present a novel adaptation of active learning to graph-based semi-supervised learning (SSL) under non-Gaussian Bayesian models. We present an approximation of non-Gaussian distributions to adapt previously Gaussian-based acquisition functions to these more general cases. We develop an efficient rank-one update for applying "look-ahead" based methods as well as model retraining. We also introduce a novel "model change" acquisition function based on these approximations that further expands the available collection of active learning acquisition functions for such methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.11126v1-abstract-full').style.display = 'none'; document.getElementById('2007.11126v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in ICML Workshop on Real World Experiment Design and Active Learning 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2006.06919">arXiv:2006.06919</a> <span> [<a href="https://arxiv.org/pdf/2006.06919">pdf</a>, <a href="https://arxiv.org/format/2006.06919">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Dynamical Systems">math.DS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> MomentumRNN: Integrating Momentum into Recurrent Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Nguyen%2C+T+M">Tan M. Nguyen</a>, <a href="/search/cs?searchtype=author&query=Baraniuk%2C+R+G">Richard G. Baraniuk</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a>, <a href="/search/cs?searchtype=author&query=Osher%2C+S+J">Stanley J. Osher</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+B">Bao Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2006.06919v2-abstract-short" style="display: inline;"> Designing deep neural networks is an art that often involves an expensive search over candidate architectures. To overcome this for recurrent neural nets (RNNs), we establish a connection between the hidden state dynamics in an RNN and gradient descent (GD). We then integrate momentum into this framework and propose a new family of RNNs, called {\em MomentumRNNs}. We theoretically prove and numeri… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.06919v2-abstract-full').style.display = 'inline'; document.getElementById('2006.06919v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2006.06919v2-abstract-full" style="display: none;"> Designing deep neural networks is an art that often involves an expensive search over candidate architectures. To overcome this for recurrent neural nets (RNNs), we establish a connection between the hidden state dynamics in an RNN and gradient descent (GD). We then integrate momentum into this framework and propose a new family of RNNs, called {\em MomentumRNNs}. We theoretically prove and numerically demonstrate that MomentumRNNs alleviate the vanishing gradient issue in training RNNs. We study the momentum long-short term memory (MomentumLSTM) and verify its advantages in convergence speed and accuracy over its LSTM counterpart across a variety of benchmarks. We also demonstrate that MomentumRNN is applicable to many types of recurrent cells, including those in the state-of-the-art orthogonal RNNs. Finally, we show that other advanced momentum-based optimization methods, such as Adam and Nesterov accelerated gradients with a restart, can be easily incorporated into the MomentumRNN framework for designing new recurrent cells with even better performance. The code is available at https://github.com/minhtannguyen/MomentumRNN. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.06919v2-abstract-full').style.display = 'none'; document.getElementById('2006.06919v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">21 pages, 11 figures, Accepted for publication at Advances in Neural Information Processing Systems (NeurIPS) 2020</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68T07 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Advances in Neural Information Processing Systems (NeurIPS) 2020 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2003.00631">arXiv:2003.00631</a> <span> [<a href="https://arxiv.org/pdf/2003.00631">pdf</a>, <a href="https://arxiv.org/format/2003.00631">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Sparsity Meets Robustness: Channel Pruning for the Feynman-Kac Formalism Principled Robust Deep Neural Nets </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Dinh%2C+T">Thu Dinh</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+B">Bao Wang</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a>, <a href="/search/cs?searchtype=author&query=Osher%2C+S+J">Stanley J. Osher</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2003.00631v1-abstract-short" style="display: inline;"> Deep neural nets (DNNs) compression is crucial for adaptation to mobile devices. Though many successful algorithms exist to compress naturally trained DNNs, developing efficient and stable compression algorithms for robustly trained DNNs remains widely open. In this paper, we focus on a co-design of efficient DNN compression algorithms and sparse neural architectures for robust and accurate deep l… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.00631v1-abstract-full').style.display = 'inline'; document.getElementById('2003.00631v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2003.00631v1-abstract-full" style="display: none;"> Deep neural nets (DNNs) compression is crucial for adaptation to mobile devices. Though many successful algorithms exist to compress naturally trained DNNs, developing efficient and stable compression algorithms for robustly trained DNNs remains widely open. In this paper, we focus on a co-design of efficient DNN compression algorithms and sparse neural architectures for robust and accurate deep learning. Such a co-design enables us to advance the goal of accommodating both sparsity and robustness. With this objective in mind, we leverage the relaxed augmented Lagrangian based algorithms to prune the weights of adversarially trained DNNs, at both structured and unstructured levels. Using a Feynman-Kac formalism principled robust and sparse DNNs, we can at least double the channel sparsity of the adversarially trained ResNet20 for CIFAR10 classification, meanwhile, improve the natural accuracy by $8.69$\% and the robust accuracy under the benchmark $20$ iterations of IFGSM attack by $5.42$\%. The code is available at \url{https://github.com/BaoWangMath/rvsm-rgsm-admm}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2003.00631v1-abstract-full').style.display = 'none'; document.getElementById('2003.00631v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 March, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 pages, 7 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68T01 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2002.10583">arXiv:2002.10583</a> <span> [<a href="https://arxiv.org/pdf/2002.10583">pdf</a>, <a href="https://arxiv.org/format/2002.10583">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Scheduled Restart Momentum for Accelerated Stochastic Gradient Descent </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wang%2C+B">Bao Wang</a>, <a href="/search/cs?searchtype=author&query=Nguyen%2C+T+M">Tan M. Nguyen</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a>, <a href="/search/cs?searchtype=author&query=Baraniuk%2C+R+G">Richard G. Baraniuk</a>, <a href="/search/cs?searchtype=author&query=Osher%2C+S+J">Stanley J. Osher</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2002.10583v2-abstract-short" style="display: inline;"> Stochastic gradient descent (SGD) with constant momentum and its variants such as Adam are the optimization algorithms of choice for training deep neural networks (DNNs). Since DNN training is incredibly computationally expensive, there is great interest in speeding up the convergence. Nesterov accelerated gradient (NAG) improves the convergence rate of gradient descent (GD) for convex optimizatio… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.10583v2-abstract-full').style.display = 'inline'; document.getElementById('2002.10583v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2002.10583v2-abstract-full" style="display: none;"> Stochastic gradient descent (SGD) with constant momentum and its variants such as Adam are the optimization algorithms of choice for training deep neural networks (DNNs). Since DNN training is incredibly computationally expensive, there is great interest in speeding up the convergence. Nesterov accelerated gradient (NAG) improves the convergence rate of gradient descent (GD) for convex optimization using a specially designed momentum; however, it accumulates error when an inexact gradient is used (such as in SGD), slowing convergence at best and diverging at worst. In this paper, we propose Scheduled Restart SGD (SRSGD), a new NAG-style scheme for training DNNs. SRSGD replaces the constant momentum in SGD by the increasing momentum in NAG but stabilizes the iterations by resetting the momentum to zero according to a schedule. Using a variety of models and benchmarks for image classification, we demonstrate that, in training DNNs, SRSGD significantly improves convergence and generalization; for instance in training ResNet200 for ImageNet classification, SRSGD achieves an error rate of 20.93% vs. the benchmark of 22.13%. These improvements become more significant as the network grows deeper. Furthermore, on both CIFAR and ImageNet, SRSGD reaches similar or even better error rates with significantly fewer training epochs compared to the SGD baseline. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.10583v2-abstract-full').style.display = 'none'; document.getElementById('2002.10583v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">35 pages, 16 figures, 18 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1902.05113">arXiv:1902.05113</a> <span> [<a href="https://arxiv.org/pdf/1902.05113">pdf</a>, <a href="https://arxiv.org/format/1902.05113">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> A Study on Graph-Structured Recurrent Neural Networks and Sparsification with Application to Epidemic Forecasting </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Li%2C+Z">Zhijian Li</a>, <a href="/search/cs?searchtype=author&query=Luo%2C+X">Xiyang Luo</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+B">Bao Wang</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a>, <a href="/search/cs?searchtype=author&query=Xin%2C+J">Jack Xin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1902.05113v1-abstract-short" style="display: inline;"> We study epidemic forecasting on real-world health data by a graph-structured recurrent neural network (GSRNN). We achieve state-of-the-art forecasting accuracy on the benchmark CDC dataset. To improve model efficiency, we sparsify the network weights via transformed-$\ell_1$ penalty and maintain prediction accuracy at the same level with 70% of the network weights being zero. </span> <span class="abstract-full has-text-grey-dark mathjax" id="1902.05113v1-abstract-full" style="display: none;"> We study epidemic forecasting on real-world health data by a graph-structured recurrent neural network (GSRNN). We achieve state-of-the-art forecasting accuracy on the benchmark CDC dataset. To improve model efficiency, we sparsify the network weights via transformed-$\ell_1$ penalty and maintain prediction accuracy at the same level with 70% of the network weights being zero. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1902.05113v1-abstract-full').style.display = 'none'; document.getElementById('1902.05113v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 February, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1811.06321">arXiv:1811.06321</a> <span> [<a href="https://arxiv.org/pdf/1811.06321">pdf</a>, <a href="https://arxiv.org/format/1811.06321">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Adaptation and Self-Organizing Systems">nlin.AO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Multivariate Spatiotemporal Hawkes Processes and Network Reconstruction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Yuan%2C+B">Baichuan Yuan</a>, <a href="/search/cs?searchtype=author&query=Li%2C+H">Hao Li</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a>, <a href="/search/cs?searchtype=author&query=Brantingham%2C+P+J">P. Jeffrey Brantingham</a>, <a href="/search/cs?searchtype=author&query=Porter%2C+M+A">Mason A. Porter</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1811.06321v1-abstract-short" style="display: inline;"> There is often latent network structure in spatial and temporal data and the tools of network analysis can yield fascinating insights into such data. In this paper, we develop a nonparametric method for network reconstruction from spatiotemporal data sets using multivariate Hawkes processes. In contrast to prior work on network reconstruction with point-process models, which has often focused on e… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.06321v1-abstract-full').style.display = 'inline'; document.getElementById('1811.06321v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1811.06321v1-abstract-full" style="display: none;"> There is often latent network structure in spatial and temporal data and the tools of network analysis can yield fascinating insights into such data. In this paper, we develop a nonparametric method for network reconstruction from spatiotemporal data sets using multivariate Hawkes processes. In contrast to prior work on network reconstruction with point-process models, which has often focused on exclusively temporal information, our approach uses both temporal and spatial information and does not assume a specific parametric form of network dynamics. This leads to an effective way of recovering an underlying network. We illustrate our approach using both synthetic networks and networks constructed from real-world data sets (a location-based social media network, a narrative of crime events, and violent gang crimes). Our results demonstrate that, in comparison to using only temporal data, our spatiotemporal approach yields improved network reconstruction, providing a basis for meaningful subsequent analysis --- such as community structure and motif analysis --- of the reconstructed networks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.06321v1-abstract-full').style.display = 'none'; document.getElementById('1811.06321v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1809.08516">arXiv:1809.08516</a> <span> [<a href="https://arxiv.org/pdf/1809.08516">pdf</a>, <a href="https://arxiv.org/format/1809.08516">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Numerical Analysis">math.NA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Adversarial Defense via Data Dependent Activation Function and Total Variation Minimization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wang%2C+B">Bao Wang</a>, <a href="/search/cs?searchtype=author&query=Lin%2C+A+T">Alex T. Lin</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+W">Wei Zhu</a>, <a href="/search/cs?searchtype=author&query=Yin%2C+P">Penghang Yin</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a>, <a href="/search/cs?searchtype=author&query=Osher%2C+S+J">Stanley J. Osher</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1809.08516v3-abstract-short" style="display: inline;"> We improve the robustness of Deep Neural Net (DNN) to adversarial attacks by using an interpolating function as the output activation. This data-dependent activation remarkably improves both the generalization and robustness of DNN. In the CIFAR10 benchmark, we raise the robust accuracy of the adversarially trained ResNet20 from $\sim 46\%$ to $\sim 69\%$ under the state-of-the-art Iterative Fast… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1809.08516v3-abstract-full').style.display = 'inline'; document.getElementById('1809.08516v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1809.08516v3-abstract-full" style="display: none;"> We improve the robustness of Deep Neural Net (DNN) to adversarial attacks by using an interpolating function as the output activation. This data-dependent activation remarkably improves both the generalization and robustness of DNN. In the CIFAR10 benchmark, we raise the robust accuracy of the adversarially trained ResNet20 from $\sim 46\%$ to $\sim 69\%$ under the state-of-the-art Iterative Fast Gradient Sign Method (IFGSM) based adversarial attack. When we combine this data-dependent activation with total variation minimization on adversarial images and training data augmentation, we achieve an improvement in robust accuracy by 38.9$\%$ for ResNet56 under the strongest IFGSM attack. Furthermore, We provide an intuitive explanation of our defense by analyzing the geometry of the feature space. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1809.08516v3-abstract-full').style.display = 'none'; document.getElementById('1809.08516v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 September, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">17 pages, 6 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68Pxx </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Inverse Problems and Imaging, 2020 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1806.02485">arXiv:1806.02485</a> <span> [<a href="https://arxiv.org/pdf/1806.02485">pdf</a>, <a href="https://arxiv.org/format/1806.02485">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Statistical Mechanics">cond-mat.stat-mech</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Statistics Theory">math.ST</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Adaptation and Self-Organizing Systems">nlin.AO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/s00332-019-09541-8">10.1007/s00332-019-09541-8 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Stochastic Block Models are a Discrete Surface Tension </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Boyd%2C+Z+M">Zachary M. Boyd</a>, <a href="/search/cs?searchtype=author&query=Porter%2C+M+A">Mason A. Porter</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1806.02485v2-abstract-short" style="display: inline;"> Networks, which represent agents and interactions between them, arise in myriad applications throughout the sciences, engineering, and even the humanities. To understand large-scale structure in a network, a common task is to cluster a network's nodes into sets called "communities", such that there are dense connections within communities but sparse connections between them. A popular and statisti… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.02485v2-abstract-full').style.display = 'inline'; document.getElementById('1806.02485v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1806.02485v2-abstract-full" style="display: none;"> Networks, which represent agents and interactions between them, arise in myriad applications throughout the sciences, engineering, and even the humanities. To understand large-scale structure in a network, a common task is to cluster a network's nodes into sets called "communities", such that there are dense connections within communities but sparse connections between them. A popular and statistically principled method to perform such clustering is to use a family of generative models known as stochastic block models (SBMs). In this paper, we show that maximum likelihood estimation in an SBM is a network analog of a well-known continuum surface-tension problem that arises from an application in metallurgy. To illustrate the utility of this relationship, we implement network analogs of three surface-tension algorithms, with which we successfully recover planted community structure in synthetic networks and which yield fascinating insights on empirical networks that we construct from hyperspectral videos. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.02485v2-abstract-full').style.display = 'none'; document.getElementById('1806.02485v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 March, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 6 June, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">to appear in Journal of Nonlinear Science</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 65K10; 49M20; 35Q56; 62H30; 91C20; 91D30; 94C15 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1804.00684">arXiv:1804.00684</a> <span> [<a href="https://arxiv.org/pdf/1804.00684">pdf</a>, <a href="https://arxiv.org/format/1804.00684">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Numerical Analysis">math.NA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Graph-Based Deep Modeling and Real Time Forecasting of Sparse Spatio-Temporal Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wang%2C+B">Bao Wang</a>, <a href="/search/cs?searchtype=author&query=Luo%2C+X">Xiyang Luo</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+F">Fangbo Zhang</a>, <a href="/search/cs?searchtype=author&query=Yuan%2C+B">Baichuan Yuan</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a>, <a href="/search/cs?searchtype=author&query=Brantingham%2C+P+J">P. Jeffrey Brantingham</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1804.00684v1-abstract-short" style="display: inline;"> We present a generic framework for spatio-temporal (ST) data modeling, analysis, and forecasting, with a special focus on data that is sparse in both space and time. Our multi-scaled framework is a seamless coupling of two major components: a self-exciting point process that models the macroscale statistical behaviors of the ST data and a graph structured recurrent neural network (GSRNN) to discov… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1804.00684v1-abstract-full').style.display = 'inline'; document.getElementById('1804.00684v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1804.00684v1-abstract-full" style="display: none;"> We present a generic framework for spatio-temporal (ST) data modeling, analysis, and forecasting, with a special focus on data that is sparse in both space and time. Our multi-scaled framework is a seamless coupling of two major components: a self-exciting point process that models the macroscale statistical behaviors of the ST data and a graph structured recurrent neural network (GSRNN) to discover the microscale patterns of the ST data on the inferred graph. This novel deep neural network (DNN) incorporates the real time interactions of the graph nodes to enable more accurate real time forecasting. The effectiveness of our method is demonstrated on both crime and traffic forecasting. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1804.00684v1-abstract-full').style.display = 'none'; document.getElementById('1804.00684v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 April, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 19 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 65-06 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1711.08833">arXiv:1711.08833</a> <span> [<a href="https://arxiv.org/pdf/1711.08833">pdf</a>, <a href="https://arxiv.org/format/1711.08833">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Numerical Analysis">math.NA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Deep Learning for Real-Time Crime Forecasting and its Ternarization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wang%2C+B">Bao Wang</a>, <a href="/search/cs?searchtype=author&query=Yin%2C+P">Penghang Yin</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a>, <a href="/search/cs?searchtype=author&query=Brantingham%2C+P+J">P. Jeffrey Brantingham</a>, <a href="/search/cs?searchtype=author&query=Osher%2C+S+J">Stanley J. Osher</a>, <a href="/search/cs?searchtype=author&query=Xin%2C+J">Jack Xin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1711.08833v1-abstract-short" style="display: inline;"> Real-time crime forecasting is important. However, accurate prediction of when and where the next crime will happen is difficult. No known physical model provides a reasonable approximation to such a complex system. Historical crime data are sparse in both space and time and the signal of interests is weak. In this work, we first present a proper representation of crime data. We then adapt the spa… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1711.08833v1-abstract-full').style.display = 'inline'; document.getElementById('1711.08833v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1711.08833v1-abstract-full" style="display: none;"> Real-time crime forecasting is important. However, accurate prediction of when and where the next crime will happen is difficult. No known physical model provides a reasonable approximation to such a complex system. Historical crime data are sparse in both space and time and the signal of interests is weak. In this work, we first present a proper representation of crime data. We then adapt the spatial temporal residual network on the well represented data to predict the distribution of crime in Los Angeles at the scale of hours in neighborhood-sized parcels. These experiments as well as comparisons with several existing approaches to prediction demonstrate the superiority of the proposed model in terms of accuracy. Finally, we present a ternarization technique to address the resource consumption issue for its deployment in real world. This work is an extension of our short conference proceeding paper [Wang et al, Arxiv 1707.03340]. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1711.08833v1-abstract-full').style.display = 'none'; document.getElementById('1711.08833v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 November, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 7 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 62-07 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1707.03340">arXiv:1707.03340</a> <span> [<a href="https://arxiv.org/pdf/1707.03340">pdf</a>, <a href="https://arxiv.org/format/1707.03340">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Numerical Analysis">math.NA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Deep Learning for Real Time Crime Forecasting </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wang%2C+B">Bao Wang</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+D">Duo Zhang</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+D">Duanhao Zhang</a>, <a href="/search/cs?searchtype=author&query=Brantingham%2C+P+J">P. Jeffery Brantingham</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1707.03340v1-abstract-short" style="display: inline;"> Accurate real time crime prediction is a fundamental issue for public safety, but remains a challenging problem for the scientific community. Crime occurrences depend on many complex factors. Compared to many predictable events, crime is sparse. At different spatio-temporal scales, crime distributions display dramatically different patterns. These distributions are of very low regularity in both s… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1707.03340v1-abstract-full').style.display = 'inline'; document.getElementById('1707.03340v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1707.03340v1-abstract-full" style="display: none;"> Accurate real time crime prediction is a fundamental issue for public safety, but remains a challenging problem for the scientific community. Crime occurrences depend on many complex factors. Compared to many predictable events, crime is sparse. At different spatio-temporal scales, crime distributions display dramatically different patterns. These distributions are of very low regularity in both space and time. In this work, we adapt the state-of-the-art deep learning spatio-temporal predictor, ST-ResNet [Zhang et al, AAAI, 2017], to collectively predict crime distribution over the Los Angeles area. Our models are two staged. First, we preprocess the raw crime data. This includes regularization in both space and time to enhance predictable signals. Second, we adapt hierarchical structures of residual convolutional units to train multi-factor crime prediction models. Experiments over a half year period in Los Angeles reveal highly accurate predictive power of our models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1707.03340v1-abstract-full').style.display = 'none'; document.getElementById('1707.03340v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 July, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">4 pages, 6 figures, NOLTA, 2017</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68T05 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1704.02955">arXiv:1704.02955</a> <span> [<a href="https://arxiv.org/pdf/1704.02955">pdf</a>, <a href="https://arxiv.org/format/1704.02955">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> </div> </div> <p class="title is-5 mathjax"> Unsupervised record matching with noisy and incomplete data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=van+Gennip%2C+Y">Yves van Gennip</a>, <a href="/search/cs?searchtype=author&query=Hunter%2C+B">Blake Hunter</a>, <a href="/search/cs?searchtype=author&query=Ma%2C+A">Anna Ma</a>, <a href="/search/cs?searchtype=author&query=Moyer%2C+D">Daniel Moyer</a>, <a href="/search/cs?searchtype=author&query=de+Vera%2C+R">Ryan de Vera</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1704.02955v2-abstract-short" style="display: inline;"> We consider the problem of duplicate detection in noisy and incomplete data: given a large data set in which each record has multiple entries (attributes), detect which distinct records refer to the same real world entity. This task is complicated by noise (such as misspellings) and missing data, which can lead to records being different, despite referring to the same entity. Our method consists o… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1704.02955v2-abstract-full').style.display = 'inline'; document.getElementById('1704.02955v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1704.02955v2-abstract-full" style="display: none;"> We consider the problem of duplicate detection in noisy and incomplete data: given a large data set in which each record has multiple entries (attributes), detect which distinct records refer to the same real world entity. This task is complicated by noise (such as misspellings) and missing data, which can lead to records being different, despite referring to the same entity. Our method consists of three main steps: creating a similarity score between records, grouping records together into "unique entities", and refining the groups. We compare various methods for creating similarity scores between noisy records, considering different combinations of string matching, term frequency-inverse document frequency methods, and n-gram techniques. In particular, we introduce a vectorized soft term frequency-inverse document frequency method, with an optional refinement step. We also discuss two methods to deal with missing data in computing similarity scores. We test our method on the Los Angeles Police Department Field Interview Card data set, the Cora Citation Matching data set, and two sets of restaurant review data. The results show that the methods that use words as the basic units are preferable to those that use 3-grams. Moreover, in some (but certainly not all) parameter ranges soft term frequency-inverse document frequency methods can outperform the standard term frequency-inverse document frequency method. The results also confirm that our method for automatically determining the number of groups typically works well in many cases and allows for accurate results in the absence of a priori knowledge of the number of unique entities in the data set. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1704.02955v2-abstract-full').style.display = 'none'; document.getElementById('1704.02955v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 April, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 April, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">24 pages, 17 figures; this second version has various significant updates compared to version 1 as a result of the peer review process prior to journal publication; we thank the reviewers for their comments</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1703.08816">arXiv:1703.08816</a> <span> [<a href="https://arxiv.org/pdf/1703.08816">pdf</a>, <a href="https://arxiv.org/format/1703.08816">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Uncertainty quantification in graph-based classification of high dimensional data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a>, <a href="/search/cs?searchtype=author&query=Luo%2C+X">Xiyang Luo</a>, <a href="/search/cs?searchtype=author&query=Stuart%2C+A+M">Andrew M. Stuart</a>, <a href="/search/cs?searchtype=author&query=Zygalakis%2C+K+C">Konstantinos C. Zygalakis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1703.08816v2-abstract-short" style="display: inline;"> Classification of high dimensional data finds wide-ranging applications. In many of these applications equipping the resulting classification with a measure of uncertainty may be as important as the classification itself. In this paper we introduce, develop algorithms for, and investigate the properties of, a variety of Bayesian models for the task of binary classification; via the posterior distr… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1703.08816v2-abstract-full').style.display = 'inline'; document.getElementById('1703.08816v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1703.08816v2-abstract-full" style="display: none;"> Classification of high dimensional data finds wide-ranging applications. In many of these applications equipping the resulting classification with a measure of uncertainty may be as important as the classification itself. In this paper we introduce, develop algorithms for, and investigate the properties of, a variety of Bayesian models for the task of binary classification; via the posterior distribution on the classification labels, these methods automatically give measures of uncertainty. The methods are all based around the graph formulation of semi-supervised learning. We provide a unified framework which brings together a variety of methods which have been introduced in different communities within the mathematical sciences. We study probit classification in the graph-based setting, generalize the level-set method for Bayesian inverse problems to the classification setting, and generalize the Ginzburg-Landau optimization-based classifier to a Bayesian setting; we also show that the probit and level set approaches are natural relaxations of the harmonic function approach introduced in [Zhu et al 2003]. We introduce efficient numerical methods, suited to large data-sets, for both MCMC-based sampling as well as gradient-based MAP estimation. Through numerical experiments we study classification accuracy and uncertainty quantification for our models; these experiments showcase a suite of datasets commonly used to evaluate graph-based semi-supervised learning algorithms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1703.08816v2-abstract-full').style.display = 'none'; document.getElementById('1703.08816v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 February, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 March, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">33 pages, 14 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1701.01505">arXiv:1701.01505</a> <span> [<a href="https://arxiv.org/pdf/1701.01505">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1186/s40163-017-0074-0">10.1186/s40163-017-0074-0 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Crime Topic Modeling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Kuang%2C+D">Da Kuang</a>, <a href="/search/cs?searchtype=author&query=Brantingham%2C+P+J">P. Jeffrey Brantingham</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1701.01505v2-abstract-short" style="display: inline;"> The classification of crime into discrete categories entails a massive loss of information. Crimes emerge out of a complex mix of behaviors and situations, yet most of these details cannot be captured by singular crime type labels. This information loss impacts our ability to not only understand the causes of crime, but also how to develop optimal crime prevention strategies. We apply machine lear… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1701.01505v2-abstract-full').style.display = 'inline'; document.getElementById('1701.01505v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1701.01505v2-abstract-full" style="display: none;"> The classification of crime into discrete categories entails a massive loss of information. Crimes emerge out of a complex mix of behaviors and situations, yet most of these details cannot be captured by singular crime type labels. This information loss impacts our ability to not only understand the causes of crime, but also how to develop optimal crime prevention strategies. We apply machine learning methods to short narrative text descriptions accompanying crime records with the goal of discovering ecologically more meaningful latent crime classes. We term these latent classes "crime topics" in reference to text-based topic modeling methods that produce them. We use topic distributions to measure clustering among formally recognized crime types. Crime topics replicate broad distinctions between violent and property crime, but also reveal nuances linked to target characteristics, situational conditions and the tools and methods of attack. Formal crime types are not discrete in topic space. Rather, crime types are distributed across a range of crime topics. Similarly, individual crime topics are distributed across a range of formal crime types. Key ecological groups include identity theft, shoplifting, burglary and theft, car crimes and vandalism, criminal threats and confidence crimes, and violent crimes. Though not a replacement for formal legal crime classifications, crime topics provide a unique window into the heterogeneous causal processes underlying crime. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1701.01505v2-abstract-full').style.display = 'none'; document.getElementById('1701.01505v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 August, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 January, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">47 pages, 4 tables, 7 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Kuang, D., Brantingham, P. J., & Bertozzi, A. L. (2017). Crime topic modeling. Crime Science, 6(1), 12 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1604.08182">arXiv:1604.08182</a> <span> [<a href="https://arxiv.org/pdf/1604.08182">pdf</a>, <a href="https://arxiv.org/format/1604.08182">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TGRS.2017.2654486">10.1109/TGRS.2017.2654486 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Unsupervised Classification in Hyperspectral Imagery with Nonlocal Total Variation and Primal-Dual Hybrid Gradient Algorithm </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Zhu%2C+W">Wei Zhu</a>, <a href="/search/cs?searchtype=author&query=Chayes%2C+V">Victoria Chayes</a>, <a href="/search/cs?searchtype=author&query=Tiard%2C+A">Alexandre Tiard</a>, <a href="/search/cs?searchtype=author&query=Sanchez%2C+S">Stephanie Sanchez</a>, <a href="/search/cs?searchtype=author&query=Dahlberg%2C+D">Devin Dahlberg</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a>, <a href="/search/cs?searchtype=author&query=Osher%2C+S">Stanley Osher</a>, <a href="/search/cs?searchtype=author&query=Zosso%2C+D">Dominique Zosso</a>, <a href="/search/cs?searchtype=author&query=Kuang%2C+D">Da Kuang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1604.08182v2-abstract-short" style="display: inline;"> In this paper, a graph-based nonlocal total variation method (NLTV) is proposed for unsupervised classification of hyperspectral images (HSI). The variational problem is solved by the primal-dual hybrid gradient (PDHG) algorithm. By squaring the labeling function and using a stable simplex clustering routine, an unsupervised clustering method with random initialization can be implemented. The effe… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1604.08182v2-abstract-full').style.display = 'inline'; document.getElementById('1604.08182v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1604.08182v2-abstract-full" style="display: none;"> In this paper, a graph-based nonlocal total variation method (NLTV) is proposed for unsupervised classification of hyperspectral images (HSI). The variational problem is solved by the primal-dual hybrid gradient (PDHG) algorithm. By squaring the labeling function and using a stable simplex clustering routine, an unsupervised clustering method with random initialization can be implemented. The effectiveness of this proposed algorithm is illustrated on both synthetic and real-world HSI, and numerical results show that the proposed algorithm outperforms other standard unsupervised clustering methods such as spherical K-means, nonnegative matrix factorization (NMF), and the graph-based Merriman-Bence-Osher (MBO) scheme. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1604.08182v2-abstract-full').style.display = 'none'; document.getElementById('1604.08182v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 February, 2017; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 April, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2016. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1510.08106">arXiv:1510.08106</a> <span> [<a href="https://arxiv.org/pdf/1510.08106">pdf</a>, <a href="https://arxiv.org/format/1510.08106">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1103/PhysRevE.93.022308">10.1103/PhysRevE.93.022308 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Growth and Containment of a Hierarchical Criminal Network </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Marshak%2C+C+Z">Charles Z. Marshak</a>, <a href="/search/cs?searchtype=author&query=Rombach%2C+M+P">M. Puck Rombach</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a>, <a href="/search/cs?searchtype=author&query=D%27Orsogna%2C+M+R">Maria R. D'Orsogna</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1510.08106v3-abstract-short" style="display: inline;"> We model the hierarchical evolution of an organized criminal network via antagonistic recruitment and pursuit processes. Within the recruitment phase, a criminal kingpin enlists new members into the network, who in turn seek out other affiliates. New recruits are linked to established criminals according to a probability distribution that depends on the current network structure. At the same time,… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1510.08106v3-abstract-full').style.display = 'inline'; document.getElementById('1510.08106v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1510.08106v3-abstract-full" style="display: none;"> We model the hierarchical evolution of an organized criminal network via antagonistic recruitment and pursuit processes. Within the recruitment phase, a criminal kingpin enlists new members into the network, who in turn seek out other affiliates. New recruits are linked to established criminals according to a probability distribution that depends on the current network structure. At the same time, law enforcement agents attempt to dismantle the growing organization using pursuit strategies that initiate on the lower level nodes and that unfold as self-avoiding random walks. The global details of the organization are unknown to law enforcement, who must explore the hierarchy node by node. We halt the pursuit when certain local criteria of the network are uncovered, encoding if and when an arrest is made; the criminal network is assumed to be eradicated if the kingpin is arrested. We first analyze recruitment and study the large scale properties of the growing network; later we add pursuit and use numerical simulations to study the eradication probability in the case of three pursuit strategies, the time to first eradication and related costs. Within the context of this model, we find that eradication becomes increasingly costly as the network increases in size and that the optimal way of arresting the kingpin is to intervene at the early stages of network formation. We discuss our results in the context of dark network disruption and their implications on possible law enforcement strategies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1510.08106v3-abstract-full').style.display = 'none'; document.getElementById('1510.08106v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 January, 2016; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 October, 2015; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2015. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 pages, 11 Figures; New title; Updated figures with color scheme better suited for colorblind readers and for gray scale printing</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1304.4679">arXiv:1304.4679</a> <span> [<a href="https://arxiv.org/pdf/1304.4679">pdf</a>, <a href="https://arxiv.org/format/1304.4679">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> </div> </div> <p class="title is-5 mathjax"> A Method Based on Total Variation for Network Modularity Optimization using the MBO Scheme </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Hu%2C+H">Huiyi Hu</a>, <a href="/search/cs?searchtype=author&query=Laurent%2C+T">Thomas Laurent</a>, <a href="/search/cs?searchtype=author&query=Porter%2C+M+A">Mason A. Porter</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1304.4679v1-abstract-short" style="display: inline;"> The study of network structure is pervasive in sociology, biology, computer science, and many other disciplines. One of the most important areas of network science is the algorithmic detection of cohesive groups of nodes called "communities". One popular approach to find communities is to maximize a quality function known as {\em modularity} to achieve some sort of optimal clustering of nodes. In… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1304.4679v1-abstract-full').style.display = 'inline'; document.getElementById('1304.4679v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1304.4679v1-abstract-full" style="display: none;"> The study of network structure is pervasive in sociology, biology, computer science, and many other disciplines. One of the most important areas of network science is the algorithmic detection of cohesive groups of nodes called "communities". One popular approach to find communities is to maximize a quality function known as {\em modularity} to achieve some sort of optimal clustering of nodes. In this paper, we interpret the modularity function from a novel perspective: we reformulate modularity optimization as a minimization problem of an energy functional that consists of a total variation term and an $\ell_2$ balance term. By employing numerical techniques from image processing and $\ell_1$ compressive sensing -- such as convex splitting and the Merriman-Bence-Osher (MBO) scheme -- we develop a variational algorithm for the minimization problem. We present our computational results using both synthetic benchmark networks and real data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1304.4679v1-abstract-full').style.display = 'none'; document.getElementById('1304.4679v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 April, 2013; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2013. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">23 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 62H30; 91C20; 91D30; 94C15 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1211.7180">arXiv:1211.7180</a> <span> [<a href="https://arxiv.org/pdf/1211.7180">pdf</a>, <a href="https://arxiv.org/format/1211.7180">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Analysis, Statistics and Probability">physics.data-an</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ICDMW.2012.72">10.1109/ICDMW.2012.72 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Multislice Modularity Optimization in Community Detection and Image Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Hu%2C+H">Huiyi Hu</a>, <a href="/search/cs?searchtype=author&query=van+Gennip%2C+Y">Yves van Gennip</a>, <a href="/search/cs?searchtype=author&query=Hunter%2C+B">Blake Hunter</a>, <a href="/search/cs?searchtype=author&query=Porter%2C+M+A">Mason A. Porter</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1211.7180v1-abstract-short" style="display: inline;"> Because networks can be used to represent many complex systems, they have attracted considerable attention in physics, computer science, sociology, and many other disciplines. One of the most important areas of network science is the algorithmic detection of cohesive groups (i.e., "communities") of nodes. In this paper, we algorithmically detect communities in social networks and image data by opt… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1211.7180v1-abstract-full').style.display = 'inline'; document.getElementById('1211.7180v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1211.7180v1-abstract-full" style="display: none;"> Because networks can be used to represent many complex systems, they have attracted considerable attention in physics, computer science, sociology, and many other disciplines. One of the most important areas of network science is the algorithmic detection of cohesive groups (i.e., "communities") of nodes. In this paper, we algorithmically detect communities in social networks and image data by optimizing multislice modularity. A key advantage of modularity optimization is that it does not require prior knowledge of the number or sizes of communities, and it is capable of finding network partitions that are composed of communities of different sizes. By optimizing multislice modularity and subsequently calculating diagnostics on the resulting network partitions, it is thereby possible to obtain information about network structure across multiple system scales. We illustrate this method on data from both social networks and images, and we find that optimization of multislice modularity performs well on these two tasks without the need for extensive problem-specific adaptation. However, improving the computational speed of this method remains a challenging open problem. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1211.7180v1-abstract-full').style.display = 'none'; document.getElementById('1211.7180v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 November, 2012; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2012. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">3 pages, 2 figures, to appear in IEEE International Conference on Data Mining PhD forum conference proceedings</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1206.4969">arXiv:1206.4969</a> <span> [<a href="https://arxiv.org/pdf/1206.4969">pdf</a>, <a href="https://arxiv.org/format/1206.4969">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Applications">stat.AP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> </div> </div> <p class="title is-5 mathjax"> Community detection using spectral clustering on sparse geosocial data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=van+Gennip%2C+Y">Yves van Gennip</a>, <a href="/search/cs?searchtype=author&query=Hunter%2C+B">Blake Hunter</a>, <a href="/search/cs?searchtype=author&query=Ahn%2C+R">Raymond Ahn</a>, <a href="/search/cs?searchtype=author&query=Elliott%2C+P">Peter Elliott</a>, <a href="/search/cs?searchtype=author&query=Luh%2C+K">Kyle Luh</a>, <a href="/search/cs?searchtype=author&query=Halvorson%2C+M">Megan Halvorson</a>, <a href="/search/cs?searchtype=author&query=Reid%2C+S">Shannon Reid</a>, <a href="/search/cs?searchtype=author&query=Valasik%2C+M">Matt Valasik</a>, <a href="/search/cs?searchtype=author&query=Wo%2C+J">James Wo</a>, <a href="/search/cs?searchtype=author&query=Tita%2C+G+E">George E. Tita</a>, <a href="/search/cs?searchtype=author&query=Bertozzi%2C+A+L">Andrea L. Bertozzi</a>, <a href="/search/cs?searchtype=author&query=Brantingham%2C+P+J">P. Jeffrey Brantingham</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1206.4969v3-abstract-short" style="display: inline;"> In this article we identify social communities among gang members in the Hollenbeck policing district in Los Angeles, based on sparse observations of a combination of social interactions and geographic locations of the individuals. This information, coming from LAPD Field Interview cards, is used to construct a similarity graph for the individuals. We use spectral clustering to identify clusters i… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1206.4969v3-abstract-full').style.display = 'inline'; document.getElementById('1206.4969v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1206.4969v3-abstract-full" style="display: none;"> In this article we identify social communities among gang members in the Hollenbeck policing district in Los Angeles, based on sparse observations of a combination of social interactions and geographic locations of the individuals. This information, coming from LAPD Field Interview cards, is used to construct a similarity graph for the individuals. We use spectral clustering to identify clusters in the graph, corresponding to communities in Hollenbeck, and compare these with the LAPD's knowledge of the individuals' gang membership. We discuss different ways of encoding the geosocial information using a graph structure and the influence on the resulting clusterings. Finally we analyze the robustness of this technique with respect to noisy and incomplete data, thereby providing suggestions about the relative importance of quantity versus quality of collected data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1206.4969v3-abstract-full').style.display = 'none'; document.getElementById('1206.4969v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 November, 2012; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 21 June, 2012; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2012. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">22 pages, 6 figures (with subfigures)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 62H30; 91C20; 91D30; 94C15 </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>