CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;25 of 25 results for author: <span class="mathjax">Bruce, N</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Bruce%2C+N">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Bruce, N"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Bruce%2C+N&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Bruce, N"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2503.09725">arXiv:2503.09725</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2503.09725">pdf</a>, <a href="https://arxiv.org/format/2503.09725">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> </div> <p class="title is-5 mathjax"> Leveraging Social Media and Google Trends to Identify Waves of Avian Influenza Outbreaks in USA and Canada </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Soltani%2C+M">Marzieh Soltani</a>, <a href="/search/cs?searchtype=author&amp;query=Dara%2C+R">Rozita Dara</a>, <a href="/search/cs?searchtype=author&amp;query=Poljak%2C+Z">Zvonimir Poljak</a>, <a href="/search/cs?searchtype=author&amp;query=Dub%C3%A9%2C+C">Caroline Dub茅</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N">Neil Bruce</a>, <a href="/search/cs?searchtype=author&amp;query=Sharif%2C+S">Shayan Sharif</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2503.09725v1-abstract-short" style="display: inline;"> Avian Influenza Virus (AIV) poses significant threats to the poultry industry, humans, domestic animals, and wildlife health worldwide. Monitoring this infectious disease is important for rapid and effective response to potential outbreaks. Conventional avian influenza surveillance systems have exhibited limitations in providing timely alerts for potential outbreaks. This study aimed to examine th&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.09725v1-abstract-full').style.display = 'inline'; document.getElementById('2503.09725v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2503.09725v1-abstract-full" style="display: none;"> Avian Influenza Virus (AIV) poses significant threats to the poultry industry, humans, domestic animals, and wildlife health worldwide. Monitoring this infectious disease is important for rapid and effective response to potential outbreaks. Conventional avian influenza surveillance systems have exhibited limitations in providing timely alerts for potential outbreaks. This study aimed to examine the idea of using online activity on social media, and Google searches to improve the identification of AIV in the early stage of an outbreak in a region. To this end, to evaluate the feasibility of this approach, we collected historical data on online user activities from X (formerly known as Twitter) and Google Trends and assessed the statistical correlation of activities in a region with the AIV outbreak officially reported case numbers. In order to mitigate the effect of the noisy content on the outbreak identification process, large language models were utilized to filter out the relevant online activity on X that could be indicative of an outbreak. Additionally, we conducted trend analysis on the selected internet-based data sources in terms of their timeliness and statistical significance in identifying AIV outbreaks. Moreover, we performed an ablation study using autoregressive forecasting models to identify the contribution of X and Google Trends in predicting AIV outbreaks. The experimental findings illustrate that online activity on social media and search engine trends can detect avian influenza outbreaks, providing alerts earlier compared to official reports. This study suggests that real-time analysis of social media outlets and Google search trends can be used in avian influenza outbreak early warning systems, supporting epidemiologists and animal health professionals in informed decision-making. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.09725v1-abstract-full').style.display = 'none'; document.getElementById('2503.09725v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 March, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 pages, 9 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.13510">arXiv:2402.13510</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2402.13510">pdf</a>, <a href="https://arxiv.org/format/2402.13510">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SealD-NeRF: Interactive Pixel-Level Editing for Dynamic Scenes by Neural Radiance Fields </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Huang%2C+Z">Zhentao Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Shi%2C+Y">Yukun Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N">Neil Bruce</a>, <a href="/search/cs?searchtype=author&amp;query=Gong%2C+M">Minglun Gong</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.13510v1-abstract-short" style="display: inline;"> The widespread adoption of implicit neural representations, especially Neural Radiance Fields (NeRF), highlights a growing need for editing capabilities in implicit 3D models, essential for tasks like scene post-processing and 3D content creation. Despite previous efforts in NeRF editing, challenges remain due to limitations in editing flexibility and quality. The key issue is developing a neural&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.13510v1-abstract-full').style.display = 'inline'; document.getElementById('2402.13510v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.13510v1-abstract-full" style="display: none;"> The widespread adoption of implicit neural representations, especially Neural Radiance Fields (NeRF), highlights a growing need for editing capabilities in implicit 3D models, essential for tasks like scene post-processing and 3D content creation. Despite previous efforts in NeRF editing, challenges remain due to limitations in editing flexibility and quality. The key issue is developing a neural representation that supports local edits for real-time updates. Current NeRF editing methods, offering pixel-level adjustments or detailed geometry and color modifications, are mostly limited to static scenes. This paper introduces SealD-NeRF, an extension of Seal-3D for pixel-level editing in dynamic settings, specifically targeting the D-NeRF network. It allows for consistent edits across sequences by mapping editing actions to a specific timeframe, freezing the deformation network responsible for dynamic scene representation, and using a teacher-student approach to integrate changes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.13510v1-abstract-full').style.display = 'none'; document.getElementById('2402.13510v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68T45 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.01783">arXiv:2211.01783</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2211.01783">pdf</a>, <a href="https://arxiv.org/format/2211.01783">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Quantifying and Learning Static vs. Dynamic Information in Deep Spatiotemporal Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kowal%2C+M">Matthew Kowal</a>, <a href="/search/cs?searchtype=author&amp;query=Siam%2C+M">Mennatullah Siam</a>, <a href="/search/cs?searchtype=author&amp;query=Islam%2C+M+A">Md Amirul Islam</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N+D+B">Neil D. B. Bruce</a>, <a href="/search/cs?searchtype=author&amp;query=Wildes%2C+R+P">Richard P. Wildes</a>, <a href="/search/cs?searchtype=author&amp;query=Derpanis%2C+K+G">Konstantinos G. Derpanis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.01783v2-abstract-short" style="display: inline;"> There is limited understanding of the information captured by deep spatiotemporal models in their intermediate representations. For example, while evidence suggests that action recognition algorithms are heavily influenced by visual appearance in single frames, no quantitative methodology exists for evaluating such static bias in the latent representation compared to bias toward dynamics. We tackl&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.01783v2-abstract-full').style.display = 'inline'; document.getElementById('2211.01783v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.01783v2-abstract-full" style="display: none;"> There is limited understanding of the information captured by deep spatiotemporal models in their intermediate representations. For example, while evidence suggests that action recognition algorithms are heavily influenced by visual appearance in single frames, no quantitative methodology exists for evaluating such static bias in the latent representation compared to bias toward dynamics. We tackle this challenge by proposing an approach for quantifying the static and dynamic biases of any spatiotemporal model, and apply our approach to three tasks, action recognition, automatic video object segmentation (AVOS) and video instance segmentation (VIS). Our key findings are: (i) Most examined models are biased toward static information. (ii) Some datasets that are assumed to be biased toward dynamics are actually biased toward static information. (iii) Individual channels in an architecture can be biased toward static, dynamic or a combination of the two. (iv) Most models converge to their culminating biases in the first half of training. We then explore how these biases affect performance on dynamically biased datasets. For action recognition, we propose StaticDropout, a semantically guided dropout that debiases a model from static information toward dynamics. For AVOS, we design a better combination of fusion and cross connection layers compared with previous architectures. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.01783v2-abstract-full').style.display = 'none'; document.getElementById('2211.01783v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">TPAMI 2024. arXiv admin note: substantial text overlap with arXiv:2206.02846</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.02846">arXiv:2206.02846</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2206.02846">pdf</a>, <a href="https://arxiv.org/format/2206.02846">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> A Deeper Dive Into What Deep Spatiotemporal Networks Encode: Quantifying Static vs. Dynamic Information </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kowal%2C+M">Matthew Kowal</a>, <a href="/search/cs?searchtype=author&amp;query=Siam%2C+M">Mennatullah Siam</a>, <a href="/search/cs?searchtype=author&amp;query=Islam%2C+M+A">Md Amirul Islam</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N+D+B">Neil D. B. Bruce</a>, <a href="/search/cs?searchtype=author&amp;query=Wildes%2C+R+P">Richard P. Wildes</a>, <a href="/search/cs?searchtype=author&amp;query=Derpanis%2C+K+G">Konstantinos G. Derpanis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.02846v1-abstract-short" style="display: inline;"> Deep spatiotemporal models are used in a variety of computer vision tasks, such as action recognition and video object segmentation. Currently, there is a limited understanding of what information is captured by these models in their intermediate representations. For example, while it has been observed that action recognition algorithms are heavily influenced by visual appearance in single static&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.02846v1-abstract-full').style.display = 'inline'; document.getElementById('2206.02846v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.02846v1-abstract-full" style="display: none;"> Deep spatiotemporal models are used in a variety of computer vision tasks, such as action recognition and video object segmentation. Currently, there is a limited understanding of what information is captured by these models in their intermediate representations. For example, while it has been observed that action recognition algorithms are heavily influenced by visual appearance in single static frames, there is no quantitative methodology for evaluating such static bias in the latent representation compared to bias toward dynamic information (e.g. motion). We tackle this challenge by proposing a novel approach for quantifying the static and dynamic biases of any spatiotemporal model. To show the efficacy of our approach, we analyse two widely studied tasks, action recognition and video object segmentation. Our key findings are threefold: (i) Most examined spatiotemporal models are biased toward static information; although, certain two-stream architectures with cross-connections show a better balance between the static and dynamic information captured. (ii) Some datasets that are commonly assumed to be biased toward dynamics are actually biased toward static information. (iii) Individual units (channels) in an architecture can be biased toward static, dynamic or a combination of the two. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.02846v1-abstract-full').style.display = 'none'; document.getElementById('2206.02846v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">CVPR 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.10335">arXiv:2110.10335</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.10335">pdf</a>, <a href="https://arxiv.org/format/2110.10335">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Simpler Does It: Generating Semantic Labels with Objectness Guidance </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Islam%2C+M+A">Md Amirul Islam</a>, <a href="/search/cs?searchtype=author&amp;query=Kowal%2C+M">Matthew Kowal</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+S">Sen Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Derpanis%2C+K+G">Konstantinos G. Derpanis</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N+D+B">Neil D. B. Bruce</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.10335v1-abstract-short" style="display: inline;"> Existing weakly or semi-supervised semantic segmentation methods utilize image or box-level supervision to generate pseudo-labels for weakly labeled images. However, due to the lack of strong supervision, the generated pseudo-labels are often noisy near the object boundaries, which severely impacts the network&#39;s ability to learn strong representations. To address this problem, we present a novel f&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.10335v1-abstract-full').style.display = 'inline'; document.getElementById('2110.10335v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.10335v1-abstract-full" style="display: none;"> Existing weakly or semi-supervised semantic segmentation methods utilize image or box-level supervision to generate pseudo-labels for weakly labeled images. However, due to the lack of strong supervision, the generated pseudo-labels are often noisy near the object boundaries, which severely impacts the network&#39;s ability to learn strong representations. To address this problem, we present a novel framework that generates pseudo-labels for training images, which are then used to train a segmentation model. To generate pseudo-labels, we combine information from: (i) a class agnostic objectness network that learns to recognize object-like regions, and (ii) either image-level or bounding box annotations. We show the efficacy of our approach by demonstrating how the objectness network can naturally be leveraged to generate object-like regions for unseen categories. We then propose an end-to-end multi-task learning strategy, that jointly learns to segment semantics and objectness using the generated pseudo-labels. Extensive experiments demonstrate the high quality of our generated pseudo-labels and effectiveness of the proposed framework in a variety of domains. Our approach achieves better or competitive performance compared to existing weakly-supervised and semi-supervised methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.10335v1-abstract-full').style.display = 'none'; document.getElementById('2110.10335v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">BMVC 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2108.09929">arXiv:2108.09929</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2108.09929">pdf</a>, <a href="https://arxiv.org/format/2108.09929">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SegMix: Co-occurrence Driven Mixup for Semantic Segmentation and Adversarial Robustness </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Islam%2C+M+A">Md Amirul Islam</a>, <a href="/search/cs?searchtype=author&amp;query=Kowal%2C+M">Matthew Kowal</a>, <a href="/search/cs?searchtype=author&amp;query=Derpanis%2C+K+G">Konstantinos G. Derpanis</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N+D+B">Neil D. B. Bruce</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2108.09929v1-abstract-short" style="display: inline;"> In this paper, we present a strategy for training convolutional neural networks to effectively resolve interference arising from competing hypotheses relating to inter-categorical information throughout the network. The premise is based on the notion of feature binding, which is defined as the process by which activations spread across space and layers in the network are successfully integrated to&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.09929v1-abstract-full').style.display = 'inline'; document.getElementById('2108.09929v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2108.09929v1-abstract-full" style="display: none;"> In this paper, we present a strategy for training convolutional neural networks to effectively resolve interference arising from competing hypotheses relating to inter-categorical information throughout the network. The premise is based on the notion of feature binding, which is defined as the process by which activations spread across space and layers in the network are successfully integrated to arrive at a correct inference decision. In our work, this is accomplished for the task of dense image labelling by blending images based on (i) categorical clustering or (ii) the co-occurrence likelihood of categories. We then train a feature binding network which simultaneously segments and separates the blended images. Subsequent feature denoising to suppress noisy activations reveals additional desirable properties and high degrees of successful predictions. Through this process, we reveal a general mechanism, distinct from any prior methods, for boosting the performance of the base segmentation and saliency network while simultaneously increasing robustness to adversarial attacks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.09929v1-abstract-full').style.display = 'none'; document.getElementById('2108.09929v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Under submission at IJCV (BMVC 2020 Extension). arXiv admin note: substantial text overlap with arXiv:2008.05667</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2108.07884">arXiv:2108.07884</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2108.07884">pdf</a>, <a href="https://arxiv.org/format/2108.07884">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Global Pooling, More than Meets the Eye: Position Information is Encoded Channel-Wise in CNNs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Islam%2C+M+A">Md Amirul Islam</a>, <a href="/search/cs?searchtype=author&amp;query=Kowal%2C+M">Matthew Kowal</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+S">Sen Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Derpanis%2C+K+G">Konstantinos G. Derpanis</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N+D+B">Neil D. B. Bruce</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2108.07884v1-abstract-short" style="display: inline;"> In this paper, we challenge the common assumption that collapsing the spatial dimensions of a 3D (spatial-channel) tensor in a convolutional neural network (CNN) into a vector via global pooling removes all spatial information. Specifically, we demonstrate that positional information is encoded based on the ordering of the channel dimensions, while semantic information is largely not. Following th&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.07884v1-abstract-full').style.display = 'inline'; document.getElementById('2108.07884v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2108.07884v1-abstract-full" style="display: none;"> In this paper, we challenge the common assumption that collapsing the spatial dimensions of a 3D (spatial-channel) tensor in a convolutional neural network (CNN) into a vector via global pooling removes all spatial information. Specifically, we demonstrate that positional information is encoded based on the ordering of the channel dimensions, while semantic information is largely not. Following this demonstration, we show the real world impact of these findings by applying them to two applications. First, we propose a simple yet effective data augmentation strategy and loss function which improves the translation invariance of a CNN&#39;s output. Second, we propose a method to efficiently determine which channels in the latent representation are responsible for (i) encoding overall position information or (ii) region-specific positions. We first show that semantic segmentation has a significant reliance on the overall position channels to make predictions. We then show for the first time that it is possible to perform a `region-specific&#39; attack, and degrade a network&#39;s performance in a particular part of the input. We believe our findings and demonstrated applications will benefit research areas concerned with understanding the characteristics of CNNs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.07884v1-abstract-full').style.display = 'none'; document.getElementById('2108.07884v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ICCV 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2101.12322">arXiv:2101.12322</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2101.12322">pdf</a>, <a href="https://arxiv.org/format/2101.12322">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Position, Padding and Predictions: A Deeper Look at Position Information in CNNs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Islam%2C+M+A">Md Amirul Islam</a>, <a href="/search/cs?searchtype=author&amp;query=Kowal%2C+M">Matthew Kowal</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+S">Sen Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Derpanis%2C+K+G">Konstantinos G. Derpanis</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N+D+B">Neil D. B. Bruce</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2101.12322v1-abstract-short" style="display: inline;"> In contrast to fully connected networks, Convolutional Neural Networks (CNNs) achieve efficiency by learning weights associated with local filters with a finite spatial extent. An implication of this is that a filter may know what it is looking at, but not where it is positioned in the image. In this paper, we first test this hypothesis and reveal that a surprising degree of absolute position info&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.12322v1-abstract-full').style.display = 'inline'; document.getElementById('2101.12322v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2101.12322v1-abstract-full" style="display: none;"> In contrast to fully connected networks, Convolutional Neural Networks (CNNs) achieve efficiency by learning weights associated with local filters with a finite spatial extent. An implication of this is that a filter may know what it is looking at, but not where it is positioned in the image. In this paper, we first test this hypothesis and reveal that a surprising degree of absolute position information is encoded in commonly used CNNs. We show that zero padding drives CNNs to encode position information in their internal representations, while a lack of padding precludes position encoding. This gives rise to deeper questions about the role of position information in CNNs: (i) What boundary heuristics enable optimal position encoding for downstream tasks?; (ii) Does position encoding affect the learning of semantic representations?; (iii) Does position encoding always improve performance? To provide answers, we perform the largest case study to date on the role that padding and border heuristics play in CNNs. We design novel tasks which allow us to quantify boundary effects as a function of the distance to the border. Numerous semantic objectives reveal the effect of the border on semantic representations. Finally, we demonstrate the implications of these findings on multiple real-world tasks to show that position information can both help or hurt performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.12322v1-abstract-full').style.display = 'none'; document.getElementById('2101.12322v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2101.11604">arXiv:2101.11604</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2101.11604">pdf</a>, <a href="https://arxiv.org/format/2101.11604">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Shape or Texture: Understanding Discriminative Features in CNNs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Islam%2C+M+A">Md Amirul Islam</a>, <a href="/search/cs?searchtype=author&amp;query=Kowal%2C+M">Matthew Kowal</a>, <a href="/search/cs?searchtype=author&amp;query=Esser%2C+P">Patrick Esser</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+S">Sen Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Ommer%2C+B">Bjorn Ommer</a>, <a href="/search/cs?searchtype=author&amp;query=Derpanis%2C+K+G">Konstantinos G. Derpanis</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N">Neil Bruce</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2101.11604v1-abstract-short" style="display: inline;"> Contrasting the previous evidence that neurons in the later layers of a Convolutional Neural Network (CNN) respond to complex object shapes, recent studies have shown that CNNs actually exhibit a `texture bias&#39;: given an image with both texture and shape cues (e.g., a stylized image), a CNN is biased towards predicting the category corresponding to the texture. However, these previous studies cond&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.11604v1-abstract-full').style.display = 'inline'; document.getElementById('2101.11604v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2101.11604v1-abstract-full" style="display: none;"> Contrasting the previous evidence that neurons in the later layers of a Convolutional Neural Network (CNN) respond to complex object shapes, recent studies have shown that CNNs actually exhibit a `texture bias&#39;: given an image with both texture and shape cues (e.g., a stylized image), a CNN is biased towards predicting the category corresponding to the texture. However, these previous studies conduct experiments on the final classification output of the network, and fail to robustly evaluate the bias contained (i) in the latent representations, and (ii) on a per-pixel level. In this paper, we design a series of experiments that overcome these issues. We do this with the goal of better understanding what type of shape information contained in the network is discriminative, where shape information is encoded, as well as when the network learns about object shape during training. We show that a network learns the majority of overall shape information at the first few epochs of training and that this information is largely encoded in the last few layers of a CNN. Finally, we show that the encoding of shape does not imply the encoding of localized per-pixel semantic information. The experimental results and findings provide a more accurate understanding of the behaviour of current CNNs, thus helping to inform future design choices. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.11604v1-abstract-full').style.display = 'none'; document.getElementById('2101.11604v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to ICLR 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2008.05667">arXiv:2008.05667</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2008.05667">pdf</a>, <a href="https://arxiv.org/format/2008.05667">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Feature Binding with Category-Dependant MixUp for Semantic Segmentation and Adversarial Robustness </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Islam%2C+M+A">Md Amirul Islam</a>, <a href="/search/cs?searchtype=author&amp;query=Kowal%2C+M">Matthew Kowal</a>, <a href="/search/cs?searchtype=author&amp;query=Derpanis%2C+K+G">Konstantinos G. Derpanis</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N+D+B">Neil D. B. Bruce</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2008.05667v1-abstract-short" style="display: inline;"> In this paper, we present a strategy for training convolutional neural networks to effectively resolve interference arising from competing hypotheses relating to inter-categorical information throughout the network. The premise is based on the notion of feature binding, which is defined as the process by which activation&#39;s spread across space and layers in the network are successfully integrated t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.05667v1-abstract-full').style.display = 'inline'; document.getElementById('2008.05667v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2008.05667v1-abstract-full" style="display: none;"> In this paper, we present a strategy for training convolutional neural networks to effectively resolve interference arising from competing hypotheses relating to inter-categorical information throughout the network. The premise is based on the notion of feature binding, which is defined as the process by which activation&#39;s spread across space and layers in the network are successfully integrated to arrive at a correct inference decision. In our work, this is accomplished for the task of dense image labelling by blending images based on their class labels, and then training a feature binding network, which simultaneously segments and separates the blended images. Subsequent feature denoising to suppress noisy activations reveals additional desirable properties and high degrees of successful predictions. Through this process, we reveal a general mechanism, distinct from any prior methods, for boosting the performance of the base segmentation network while simultaneously increasing robustness to adversarial attacks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.05667v1-abstract-full').style.display = 'none'; document.getElementById('2008.05667v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 August, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to BMVC 2020 (Oral)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2002.10540">arXiv:2002.10540</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2002.10540">pdf</a>, <a href="https://arxiv.org/format/2002.10540">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Revisiting Saliency Metrics: Farthest-Neighbor Area Under Curve </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jia%2C+S">Sen Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N+D+B">Neil D. B. Bruce</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2002.10540v1-abstract-short" style="display: inline;"> Saliency detection has been widely studied because it plays an important role in various vision applications, but it is difficult to evaluate saliency systems because each measure has its own bias. In this paper, we first revisit the problem of applying the widely used saliency metrics on modern Convolutional Neural Networks(CNNs). Our investigation shows the saliency datasets have been built base&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.10540v1-abstract-full').style.display = 'inline'; document.getElementById('2002.10540v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2002.10540v1-abstract-full" style="display: none;"> Saliency detection has been widely studied because it plays an important role in various vision applications, but it is difficult to evaluate saliency systems because each measure has its own bias. In this paper, we first revisit the problem of applying the widely used saliency metrics on modern Convolutional Neural Networks(CNNs). Our investigation shows the saliency datasets have been built based on different choices of parameters and CNNs are designed to fit a dataset-specific distribution. Secondly, we show that the Shuffled Area Under Curve(S-AUC) metric still suffers from spatial biases. We propose a new saliency metric based on the AUC property, which aims at sampling a more directional negative set for evaluation, denoted as Farthest-Neighbor AUC(FN-AUC). We also propose a strategy to measure the quality of the sampled negative set. Our experiment shows FN-AUC can measure spatial biases, central and peripheral, more effectively than S-AUC without penalizing the fixation locations. Thirdly, we propose a global smoothing function to overcome the problem of few value degrees (output quantization) in computing AUC metrics. Comparing with random noise, our smooth function can create unique values without losing the relative saliency relationship. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.10540v1-abstract-full').style.display = 'none'; document.getElementById('2002.10540v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to CVPR 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2001.08248">arXiv:2001.08248</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2001.08248">pdf</a>, <a href="https://arxiv.org/format/2001.08248">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> How Much Position Information Do Convolutional Neural Networks Encode? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Islam%2C+M+A">Md Amirul Islam</a>, <a href="/search/cs?searchtype=author&amp;query=Jia%2C+S">Sen Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N+D+B">Neil D. B. Bruce</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2001.08248v1-abstract-short" style="display: inline;"> In contrast to fully connected networks, Convolutional Neural Networks (CNNs) achieve efficiency by learning weights associated with local filters with a finite spatial extent. An implication of this is that a filter may know what it is looking at, but not where it is positioned in the image. Information concerning absolute position is inherently useful, and it is reasonable to assume that deep CN&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2001.08248v1-abstract-full').style.display = 'inline'; document.getElementById('2001.08248v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2001.08248v1-abstract-full" style="display: none;"> In contrast to fully connected networks, Convolutional Neural Networks (CNNs) achieve efficiency by learning weights associated with local filters with a finite spatial extent. An implication of this is that a filter may know what it is looking at, but not where it is positioned in the image. Information concerning absolute position is inherently useful, and it is reasonable to assume that deep CNNs may implicitly learn to encode this information if there is a means to do so. In this paper, we test this hypothesis revealing the surprising degree of absolute position information that is encoded in commonly used neural networks. A comprehensive set of experiments show the validity of this hypothesis and shed light on how and where this information is represented while offering clues to where positional information is derived from in deep CNNs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2001.08248v1-abstract-full').style.display = 'none'; document.getElementById('2001.08248v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 January, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to ICLR 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1909.12996">arXiv:1909.12996</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1909.12996">pdf</a>, <a href="https://arxiv.org/format/1909.12996">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Distributed Iterative Gating Networks for Semantic Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Karim%2C+R">Rezaul Karim</a>, <a href="/search/cs?searchtype=author&amp;query=Islam%2C+M+A">Md Amirul Islam</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N+D+B">Neil D. B. Bruce</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1909.12996v1-abstract-short" style="display: inline;"> In this paper, we present a canonical structure for controlling information flow in neural networks with an efficient feedback routing mechanism based on a strategy of Distributed Iterative Gating (DIGNet). The structure of this mechanism derives from a strong conceptual foundation and presents a light-weight mechanism for adaptive control of computation similar to recurrent convolutional neural n&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.12996v1-abstract-full').style.display = 'inline'; document.getElementById('1909.12996v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1909.12996v1-abstract-full" style="display: none;"> In this paper, we present a canonical structure for controlling information flow in neural networks with an efficient feedback routing mechanism based on a strategy of Distributed Iterative Gating (DIGNet). The structure of this mechanism derives from a strong conceptual foundation and presents a light-weight mechanism for adaptive control of computation similar to recurrent convolutional neural networks by integrating feedback signals with a feed-forward architecture. In contrast to other RNN formulations, DIGNet generates feedback signals in a cascaded manner that implicitly carries information from all the layers above. This cascaded feedback propagation by means of the propagator gates is found to be more effective compared to other feedback mechanisms that use feedback from the output of either the corresponding stage or from the previous stage. Experiments reveal the high degree of capability that this recurrent approach with cascaded feedback presents over feed-forward baselines and other recurrent models for pixel-wise labeling problems on three challenging datasets, PASCAL VOC 2012, COCO-Stuff, and ADE20K. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.12996v1-abstract-full').style.display = 'none'; document.getElementById('1909.12996v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 September, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">WACV 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1902.07276">arXiv:1902.07276</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1902.07276">pdf</a>, <a href="https://arxiv.org/format/1902.07276">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Applications">stat.AP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Accuracy of the Epic Sepsis Prediction Model in a Regional Health System </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bennett%2C+T">Tellen Bennett</a>, <a href="/search/cs?searchtype=author&amp;query=Russell%2C+S">Seth Russell</a>, <a href="/search/cs?searchtype=author&amp;query=King%2C+J">James King</a>, <a href="/search/cs?searchtype=author&amp;query=Schilling%2C+L">Lisa Schilling</a>, <a href="/search/cs?searchtype=author&amp;query=Voong%2C+C">Chan Voong</a>, <a href="/search/cs?searchtype=author&amp;query=Rogers%2C+N">Nancy Rogers</a>, <a href="/search/cs?searchtype=author&amp;query=Adrian%2C+B">Bonnie Adrian</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N">Nicholas Bruce</a>, <a href="/search/cs?searchtype=author&amp;query=Ghosh%2C+D">Debashis Ghosh</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1902.07276v1-abstract-short" style="display: inline;"> Interest in an electronic health record-based computational model that can accurately predict a patient&#39;s risk of sepsis at a given point in time has grown rapidly in the last several years. Like other EHR vendors, the Epic Systems Corporation has developed a proprietary sepsis prediction model (ESPM). Epic developed the model using data from three health systems and penalized logistic regression.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1902.07276v1-abstract-full').style.display = 'inline'; document.getElementById('1902.07276v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1902.07276v1-abstract-full" style="display: none;"> Interest in an electronic health record-based computational model that can accurately predict a patient&#39;s risk of sepsis at a given point in time has grown rapidly in the last several years. Like other EHR vendors, the Epic Systems Corporation has developed a proprietary sepsis prediction model (ESPM). Epic developed the model using data from three health systems and penalized logistic regression. Demographic, comorbidity, vital sign, laboratory, medication, and procedural variables contribute to the model. The objective of this project was to compare the predictive performance of the ESPM with a regional health system&#39;s current Early Warning Score-based sepsis detection program. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1902.07276v1-abstract-full').style.display = 'none'; document.getElementById('1902.07276v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 February, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Presented at AMIA Symposium 2018</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1901.02425">arXiv:1901.02425</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1901.02425">pdf</a>, <a href="https://arxiv.org/format/1901.02425">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Richer and Deeper Supervision Network for Salient Object Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jia%2C+S">Sen Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N+D+B">Neil D. B. Bruce</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1901.02425v1-abstract-short" style="display: inline;"> Recent Salient Object Detection (SOD) systems are mostly based on Convolutional Neural Networks (CNNs). Specifically, Deeply Supervised Saliency (DSS) system has shown it is very useful to add short connections to the network and supervising on the side output. In this work, we propose a new SOD system which aims at designing a more efficient and effective way to pass back global information. Rich&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.02425v1-abstract-full').style.display = 'inline'; document.getElementById('1901.02425v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1901.02425v1-abstract-full" style="display: none;"> Recent Salient Object Detection (SOD) systems are mostly based on Convolutional Neural Networks (CNNs). Specifically, Deeply Supervised Saliency (DSS) system has shown it is very useful to add short connections to the network and supervising on the side output. In this work, we propose a new SOD system which aims at designing a more efficient and effective way to pass back global information. Richer and Deeper Supervision (RDS) is applied to better combine features from each side output without demanding much extra computational space. Meanwhile, the backbone network used for SOD is normally pre-trained on the object classification dataset, ImageNet. But the pre-trained model has been trained on cropped images in order to only focus on distinguishing features within the region of the object. But the ignored background information is also significant in the task of SOD. We try to solve this problem by introducing the training data designed for object detection. A coarse global information is learned based on an entire image with its bounding box before training on the SOD dataset. The large-scale of object images can slightly improve the performance of SOD. Our experiment shows the proposed RDS network achieves the state-of-the-art results on five public SOD datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.02425v1-abstract-full').style.display = 'none'; document.getElementById('1901.02425v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 January, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1812.08848">arXiv:1812.08848</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1812.08848">pdf</a>, <a href="https://arxiv.org/format/1812.08848">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SMILER: Saliency Model Implementation Library for Experimental Research </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wloka%2C+C">Calden Wloka</a>, <a href="/search/cs?searchtype=author&amp;query=Kuni%C4%87%2C+T">Toni Kuni膰</a>, <a href="/search/cs?searchtype=author&amp;query=Kotseruba%2C+I">Iuliia Kotseruba</a>, <a href="/search/cs?searchtype=author&amp;query=Fahimi%2C+R">Ramin Fahimi</a>, <a href="/search/cs?searchtype=author&amp;query=Frosst%2C+N">Nicholas Frosst</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N+D+B">Neil D. B. Bruce</a>, <a href="/search/cs?searchtype=author&amp;query=Tsotsos%2C+J+K">John K. Tsotsos</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1812.08848v1-abstract-short" style="display: inline;"> The Saliency Model Implementation Library for Experimental Research (SMILER) is a new software package which provides an open, standardized, and extensible framework for maintaining and executing computational saliency models. This work drastically reduces the human effort required to apply saliency algorithms to new tasks and datasets, while also ensuring consistency and procedural correctness fo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1812.08848v1-abstract-full').style.display = 'inline'; document.getElementById('1812.08848v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1812.08848v1-abstract-full" style="display: none;"> The Saliency Model Implementation Library for Experimental Research (SMILER) is a new software package which provides an open, standardized, and extensible framework for maintaining and executing computational saliency models. This work drastically reduces the human effort required to apply saliency algorithms to new tasks and datasets, while also ensuring consistency and procedural correctness for results and conclusions produced by different parties. At its launch SMILER already includes twenty three saliency models (fourteen models based in MATLAB and nine supported through containerization), and the open design of SMILER encourages this number to grow with future contributions from the community. The project may be downloaded and contributed to through its GitHub page: https://github.com/tsotsoslab/smiler <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1812.08848v1-abstract-full').style.display = 'none'; document.getElementById('1812.08848v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 December, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1811.08043">arXiv:1811.08043</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1811.08043">pdf</a>, <a href="https://arxiv.org/format/1811.08043">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Recurrent Iterative Gating Networks for Semantic Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Karim%2C+R">Rezaul Karim</a>, <a href="/search/cs?searchtype=author&amp;query=Islam%2C+M+A">Md Amirul Islam</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N+D+B">Neil D. B. Bruce</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1811.08043v1-abstract-short" style="display: inline;"> In this paper, we present an approach for Recurrent Iterative Gating called RIGNet. The core elements of RIGNet involve recurrent connections that control the flow of information in neural networks in a top-down manner, and different variants on the core structure are considered. The iterative nature of this mechanism allows for gating to spread in both spatial extent and feature space. This is re&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.08043v1-abstract-full').style.display = 'inline'; document.getElementById('1811.08043v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1811.08043v1-abstract-full" style="display: none;"> In this paper, we present an approach for Recurrent Iterative Gating called RIGNet. The core elements of RIGNet involve recurrent connections that control the flow of information in neural networks in a top-down manner, and different variants on the core structure are considered. The iterative nature of this mechanism allows for gating to spread in both spatial extent and feature space. This is revealed to be a powerful mechanism with broad compatibility with common existing networks. Analysis shows how gating interacts with different network characteristics, and we also show that more shallow networks with gating may be made to perform better than much deeper networks that do not include RIGNet modules. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.08043v1-abstract-full').style.display = 'none'; document.getElementById('1811.08043v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 November, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">WACV 2019</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1810.02426">arXiv:1810.02426</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1810.02426">pdf</a>, <a href="https://arxiv.org/format/1810.02426">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Relative Saliency and Ranking: Models, Metrics, Data, and Benchmarks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kalash%2C+M">Mahmoud Kalash</a>, <a href="/search/cs?searchtype=author&amp;query=Islam%2C+M+A">Md Amirul Islam</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N+D+B">Neil D. B. Bruce</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1810.02426v2-abstract-short" style="display: inline;"> Salient object detection is a problem that has been considered in detail and \textcolor{black}{many solutions have been proposed}. In this paper, we argue that work to date has addressed a problem that is relatively ill-posed. Specifically, there is not universal agreement about what constitutes a salient object when multiple observers are queried. This implies that some objects are more likely to&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1810.02426v2-abstract-full').style.display = 'inline'; document.getElementById('1810.02426v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1810.02426v2-abstract-full" style="display: none;"> Salient object detection is a problem that has been considered in detail and \textcolor{black}{many solutions have been proposed}. In this paper, we argue that work to date has addressed a problem that is relatively ill-posed. Specifically, there is not universal agreement about what constitutes a salient object when multiple observers are queried. This implies that some objects are more likely to be judged salient than others, and implies a relative rank exists on salient objects. Initially, we present a novel deep learning solution based on a hierarchical representation of relative saliency and stage-wise refinement. Further to this, we present data, analysis and baseline benchmark results towards addressing the problem of salient object ranking. Methods for deriving suitable ranked salient object instances are presented, along with metrics suitable to measuring algorithm performance. In addition, we show how a derived dataset can be successively refined to provide cleaned results that correlate well with pristine ground truth in its characteristics and value for training and testing models. Finally, we provide a comparison among prevailing algorithms that address salient object ranking or detection to establish initial baselines providing a basis for comparison with future efforts addressing this problem. \textcolor{black}{The source code and data are publicly available via our project page:} \textrm{\href{https://ryersonvisionlab.github.io/cocosalrank.html}{ryersonvisionlab.github.io/cocosalrank}} <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1810.02426v2-abstract-full').style.display = 'none'; document.getElementById('1810.02426v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 September, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 October, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to Transaction on Pattern Analysis and Machine Intelligence. arXiv admin note: substantial text overlap with arXiv:1803.05082</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1807.09430">arXiv:1807.09430</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1807.09430">pdf</a>, <a href="https://arxiv.org/format/1807.09430">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Semantics Meet Saliency: Exploring Domain Affinity and Models for Dual-Task Prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Islam%2C+M+A">Md Amirul Islam</a>, <a href="/search/cs?searchtype=author&amp;query=Kalash%2C+M">Mahmoud Kalash</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N+D+B">Neil D. B. Bruce</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1807.09430v1-abstract-short" style="display: inline;"> Much research has examined models for prediction of semantic labels or instances including dense pixel-wise prediction. The problem of predicting salient objects or regions of an image has also been examined in a similar light. With that said, there is an apparent relationship between these two problem domains in that the composition of a scene and associated semantic categories is certain to play&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1807.09430v1-abstract-full').style.display = 'inline'; document.getElementById('1807.09430v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1807.09430v1-abstract-full" style="display: none;"> Much research has examined models for prediction of semantic labels or instances including dense pixel-wise prediction. The problem of predicting salient objects or regions of an image has also been examined in a similar light. With that said, there is an apparent relationship between these two problem domains in that the composition of a scene and associated semantic categories is certain to play into what is deemed salient. In this paper, we explore the relationship between these two problem domains. This is carried out in constructing deep neural networks that perform both predictions together albeit with different configurations for flow of conceptual information related to each distinct problem. This is accompanied by a detailed analysis of object co-occurrences that shed light on dataset bias and semantic precedence specific to individual categories. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1807.09430v1-abstract-full').style.display = 'none'; document.getElementById('1807.09430v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 July, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">BMVC 2018</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1806.11266">arXiv:1806.11266</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1806.11266">pdf</a>, <a href="https://arxiv.org/format/1806.11266">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Gated Feedback Refinement Network for Coarse-to-Fine Dense Semantic Image Labeling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Islam%2C+M+A">Md Amirul Islam</a>, <a href="/search/cs?searchtype=author&amp;query=Rochan%2C+M">Mrigank Rochan</a>, <a href="/search/cs?searchtype=author&amp;query=Naha%2C+S">Shujon Naha</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N+D+B">Neil D. B. Bruce</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yang Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1806.11266v1-abstract-short" style="display: inline;"> Effective integration of local and global contextual information is crucial for semantic segmentation and dense image labeling. We develop two encoder-decoder based deep learning architectures to address this problem. We first propose a network architecture called Label Refinement Network (LRN) that predicts segmentation labels in a coarse-to-fine fashion at several spatial resolutions. In this ne&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.11266v1-abstract-full').style.display = 'inline'; document.getElementById('1806.11266v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1806.11266v1-abstract-full" style="display: none;"> Effective integration of local and global contextual information is crucial for semantic segmentation and dense image labeling. We develop two encoder-decoder based deep learning architectures to address this problem. We first propose a network architecture called Label Refinement Network (LRN) that predicts segmentation labels in a coarse-to-fine fashion at several spatial resolutions. In this network, we also define loss functions at several stages to provide supervision at different stages of training. However, there are limits to the quality of refinement possible if ambiguous information is passed forward. In order to address this issue, we also propose Gated Feedback Refinement Network (G-FRNet) that addresses this limitation. Initially, G-FRNet makes a coarse-grained prediction which it progressively refines to recover details by effectively integrating local and global contextual information during the refinement stages. This is achieved by gate units proposed in this work, that control information passed forward in order to resolve the ambiguity. Experiments were conducted on four challenging dense labeling datasets (CamVid, PASCAL VOC 2012, Horse-Cow Parsing, PASCAL-Person-Part, and SUN-RGBD). G-FRNet achieves state-of-the-art semantic segmentation results on the CamVid and Horse-Cow Parsing datasets and produces results competitive with the best performing approaches that appear in the literature for the other three datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.11266v1-abstract-full').style.display = 'none'; document.getElementById('1806.11266v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 June, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1805.01047">arXiv:1805.01047</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1805.01047">pdf</a>, <a href="https://arxiv.org/format/1805.01047">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> EML-NET:An Expandable Multi-Layer NETwork for Saliency Prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jia%2C+S">Sen Jia</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N+D+B">Neil D. B. Bruce</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1805.01047v2-abstract-short" style="display: inline;"> Saliency prediction can benefit from training that involves scene understanding that may be tangential to the central task; this may include understanding places, spatial layout, objects or involve different datasets and their bias. One can combine models, but to do this in a sophisticated manner can be complex, and also result in unwieldy networks or produce competing objectives that are hard to&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.01047v2-abstract-full').style.display = 'inline'; document.getElementById('1805.01047v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1805.01047v2-abstract-full" style="display: none;"> Saliency prediction can benefit from training that involves scene understanding that may be tangential to the central task; this may include understanding places, spatial layout, objects or involve different datasets and their bias. One can combine models, but to do this in a sophisticated manner can be complex, and also result in unwieldy networks or produce competing objectives that are hard to balance. In this paper, we propose a scalable system to leverage multiple powerful deep CNN models to better extract visual features for saliency prediction. Our design differs from previous studies in that the whole system is trained in an almost end-to-end piece-wise fashion. The encoder and decoder components are separately trained to deal with complexity tied to the computational paradigm and required space. Furthermore, the encoder can contain more than one CNN model to extract features, and models can have different architectures or be pre-trained on different datasets. This parallel design yields a better computational paradigm overcoming limits to the variety of information or inference that can be combined at the encoder stage towards deeper networks and a more powerful encoding. Our network can be easily expanded almost without any additional cost, and other pre-trained CNN models can be incorporated availing a wider range of visual knowledge. We denote our expandable multi-layer network as EML-NET and our method achieves the state-of-the-art results on the public saliency benchmarks, SALICON, MIT300 and CAT2000. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.01047v2-abstract-full').style.display = 'none'; document.getElementById('1805.01047v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 March, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 May, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1803.05082">arXiv:1803.05082</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1803.05082">pdf</a>, <a href="https://arxiv.org/format/1803.05082">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Revisiting Salient Object Detection: Simultaneous Detection, Ranking, and Subitizing of Multiple Salient Objects </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Islam%2C+M+A">Md Amirul Islam</a>, <a href="/search/cs?searchtype=author&amp;query=Kalash%2C+M">Mahmoud Kalash</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N+D+B">Neil D. B. Bruce</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1803.05082v2-abstract-short" style="display: inline;"> Salient object detection is a problem that has been considered in detail and many solutions proposed. In this paper, we argue that work to date has addressed a problem that is relatively ill-posed. Specifically, there is not universal agreement about what constitutes a salient object when multiple observers are queried. This implies that some objects are more likely to be judged salient than other&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1803.05082v2-abstract-full').style.display = 'inline'; document.getElementById('1803.05082v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1803.05082v2-abstract-full" style="display: none;"> Salient object detection is a problem that has been considered in detail and many solutions proposed. In this paper, we argue that work to date has addressed a problem that is relatively ill-posed. Specifically, there is not universal agreement about what constitutes a salient object when multiple observers are queried. This implies that some objects are more likely to be judged salient than others, and implies a relative rank exists on salient objects. The solution presented in this paper solves this more general problem that considers relative rank, and we propose data and metrics suitable to measuring success in a relative object saliency landscape. A novel deep learning solution is proposed based on a hierarchical representation of relative saliency and stage-wise refinement. We also show that the problem of salient object subitizing can be addressed with the same network, and our approach exceeds performance of any prior work across all metrics considered (both traditional and newly proposed). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1803.05082v2-abstract-full').style.display = 'none'; document.getElementById('1803.05082v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 March, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 March, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in CVPR 2018</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1703.00551">arXiv:1703.00551</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1703.00551">pdf</a>, <a href="https://arxiv.org/format/1703.00551">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Label Refinement Network for Coarse-to-Fine Semantic Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Islam%2C+M+A">Md Amirul Islam</a>, <a href="/search/cs?searchtype=author&amp;query=Naha%2C+S">Shujon Naha</a>, <a href="/search/cs?searchtype=author&amp;query=Rochan%2C+M">Mrigank Rochan</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N">Neil Bruce</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+Y">Yang Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1703.00551v1-abstract-short" style="display: inline;"> We consider the problem of semantic image segmentation using deep convolutional neural networks. We propose a novel network architecture called the label refinement network that predicts segmentation labels in a coarse-to-fine fashion at several resolutions. The segmentation labels at a coarse resolution are used together with convolutional features to obtain finer resolution segmentation labels.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1703.00551v1-abstract-full').style.display = 'inline'; document.getElementById('1703.00551v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1703.00551v1-abstract-full" style="display: none;"> We consider the problem of semantic image segmentation using deep convolutional neural networks. We propose a novel network architecture called the label refinement network that predicts segmentation labels in a coarse-to-fine fashion at several resolutions. The segmentation labels at a coarse resolution are used together with convolutional features to obtain finer resolution segmentation labels. We define loss functions at several stages in the network to provide supervisions at different stages. Our experimental results on several standard datasets demonstrate that the proposed model provides an effective way of producing pixel-wise dense image labeling. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1703.00551v1-abstract-full').style.display = 'none'; document.getElementById('1703.00551v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 March, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1702.06661">arXiv:1702.06661</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1702.06661">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Applications">stat.AP</span> </div> </div> <p class="title is-5 mathjax"> Social Learning and Diffusion of Pervasive Goods: An Empirical Study of an African App Store </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Nia%2C+M+H">Meisam Hejazi Nia</a>, <a href="/search/cs?searchtype=author&amp;query=Ratchford%2C+B+T">Brian T. Ratchford</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N">Norris Bruce</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1702.06661v1-abstract-short" style="display: inline;"> In this study, the authors develop a structural model that combines a macro diffusion model with a micro choice model to control for the effect of social influence on the mobile app choices of customers over app stores. Social influence refers to the density of adopters within the proximity of other customers. Using a large data set from an African app store and Bayesian estimation methods, the au&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1702.06661v1-abstract-full').style.display = 'inline'; document.getElementById('1702.06661v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1702.06661v1-abstract-full" style="display: none;"> In this study, the authors develop a structural model that combines a macro diffusion model with a micro choice model to control for the effect of social influence on the mobile app choices of customers over app stores. Social influence refers to the density of adopters within the proximity of other customers. Using a large data set from an African app store and Bayesian estimation methods, the authors quantify the effect of social influence and investigate the impact of ignoring this process in estimating customer choices. The findings show that customer choices in the app store are explained better by offline than online density of adopters and that ignoring social influence in estimations results in biased estimates. Furthermore, the findings show that the mobile app adoption process is similar to adoption of music CDs, among all other classic economy goods. A counterfactual analysis shows that the app store can increase its revenue by 13.6% through a viral marketing policy (e.g., a sharing with friends and family button). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1702.06661v1-abstract-full').style.display = 'none'; document.getElementById('1702.06661v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 February, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2017. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1701.06425">arXiv:1701.06425</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1701.06425">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> The Joint Diffusion of a Digital Platform and its Complementary Goods: The Effects of Product Ratings and Observational Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Nia%2C+M+H">Meisam Hejazi Nia</a>, <a href="/search/cs?searchtype=author&amp;query=Bruce%2C+N">Norris Bruce</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1701.06425v1-abstract-short" style="display: inline;"> The authors study the interdependent diffusion of an open source software (OSS) platform and its software complements. They quantify the role of OSS governance, quality signals such as product ratings, observational learning, and user actions upon adoption. To do so they extend the Bass Diffusion Model and apply it to a unique data set of 6 years of daily downloads of the Firefox browser and 52 of&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1701.06425v1-abstract-full').style.display = 'inline'; document.getElementById('1701.06425v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1701.06425v1-abstract-full" style="display: none;"> The authors study the interdependent diffusion of an open source software (OSS) platform and its software complements. They quantify the role of OSS governance, quality signals such as product ratings, observational learning, and user actions upon adoption. To do so they extend the Bass Diffusion Model and apply it to a unique data set of 6 years of daily downloads of the Firefox browser and 52 of its add-ons. The study then re-casts the resulting differential equations into non-linear, discrete-time, state space forms; and estimate them using an MCMC approach to the Extended Kalman Filtern (EKF-MCMC). Unlike continuous-time filters, the EKF-MCMC approach avoids numerical integration, and so is more computational efficient, given the length of our time-series, high dimension of our state space and need to model heterogeneity. Results show, for example, that observational learning and add-on ratings increase the demand for Firefox add-ons; add-ons can increase the market potential of the Firefox platform; a slow add-on review process can diminish platform success; and OSS platforms (i.e. Chrome and Firefox) compete rather than complement each other. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1701.06425v1-abstract-full').style.display = 'none'; document.getElementById('1701.06425v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 November, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2017. </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10