CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;19 of 19 results for author: <span class="mathjax">Uhl, A</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Uhl%2C+A">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Uhl, A"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Uhl%2C+A&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Uhl, A"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.11741">arXiv:2501.11741</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.11741">pdf</a>, <a href="https://arxiv.org/format/2501.11741">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> FaceQSORT: a Multi-Face Tracking Method based on Biometric and Appearance Features </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=J%C3%B6chl%2C+R">Robert J枚chl</a>, <a href="/search/cs?searchtype=author&amp;query=Uhl%2C+A">Andreas Uhl</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.11741v2-abstract-short" style="display: inline;"> Tracking multiple faces is a difficult problem, as there may be partially occluded or lateral faces. In multiple face tracking, association is typically based on (biometric) face features. However, the models used to extract these face features usually require frontal face images, which can limit the tracking performance. In this work, a multi-face tracking method inspired by StrongSort, FaceQSORT&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.11741v2-abstract-full').style.display = 'inline'; document.getElementById('2501.11741v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.11741v2-abstract-full" style="display: none;"> Tracking multiple faces is a difficult problem, as there may be partially occluded or lateral faces. In multiple face tracking, association is typically based on (biometric) face features. However, the models used to extract these face features usually require frontal face images, which can limit the tracking performance. In this work, a multi-face tracking method inspired by StrongSort, FaceQSORT, is proposed. To mitigate the problem of partially occluded or lateral faces, biometric face features are combined with visual appearance features (i.e., generated by a generic object classifier), with both features are extracted from the same face patch. A comprehensive experimental evaluation is performed, including a comparison of different face descriptors, an evaluation of different parameter settings, and the application of a different similarity metric. All experiments are conducted with a new multi-face tracking dataset and a subset of the ChokePoint dataset. The `Paris Lodron University Salzburg Faces in a Queue&#39; dataset consists of a total of seven fully annotated sequences (12730 frames) and is made publicly available as part of this work. Together with this dataset, annotations of 6 sequences from the ChokePoint dataset are also provided. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.11741v2-abstract-full').style.display = 'none'; document.getElementById('2501.11741v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.11974">arXiv:2404.11974</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.11974">pdf</a>, <a href="https://arxiv.org/format/2404.11974">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Device (In)Dependence of Deep Learning-based Image Age Approximation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=J%C3%B6chl%2C+R">Robert J枚chl</a>, <a href="/search/cs?searchtype=author&amp;query=Uhl%2C+A">Andreas Uhl</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.11974v1-abstract-short" style="display: inline;"> The goal of temporal image forensic is to approximate the age of a digital image relative to images from the same device. Usually, this is based on traces left during the image acquisition pipeline. For example, several methods exist that exploit the presence of in-field sensor defects for this purpose. In addition to these &#39;classical&#39; methods, there is also an approach in which a Convolutional Ne&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.11974v1-abstract-full').style.display = 'inline'; document.getElementById('2404.11974v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.11974v1-abstract-full" style="display: none;"> The goal of temporal image forensic is to approximate the age of a digital image relative to images from the same device. Usually, this is based on traces left during the image acquisition pipeline. For example, several methods exist that exploit the presence of in-field sensor defects for this purpose. In addition to these &#39;classical&#39; methods, there is also an approach in which a Convolutional Neural Network (CNN) is trained to approximate the image age. One advantage of a CNN is that it independently learns the age features used. This would make it possible to exploit other (different) age traces in addition to the known ones (i.e., in-field sensor defects). In a previous work, we have shown that the presence of strong in-field sensor defects is irrelevant for a CNN to predict the age class. Based on this observation, the question arises how device (in)dependent the learned features are. In this work, we empirically asses this by training a network on images from a single device and then apply the trained model to images from different devices. This evaluation is performed on 14 different devices, including 10 devices from the publicly available &#39;Northumbria Temporal Image Forensics&#39; database. These 10 different devices are based on five different device pairs (i.e., with the identical camera model). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.11974v1-abstract-full').style.display = 'none'; document.getElementById('2404.11974v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This work was accepted and presented in: 2022 ICPR-Workshop on Artificial Intelligence for Multimedia Forensics and Disinformation Detection. Montreal, Quebec, Canada. However, due to a technical issue on the publishing companies&#39; side, the work does not appear in the workshop proceedings</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.01241">arXiv:2311.01241</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2311.01241">pdf</a>, <a href="https://arxiv.org/format/2311.01241">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.23919/EUSIPCO.2017.8081595">10.23919/EUSIPCO.2017.8081595 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Exploring Deep Learning Image Super-Resolution for Iris Recognition </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ribeiro%2C+E">Eduardo Ribeiro</a>, <a href="/search/cs?searchtype=author&amp;query=Uhl%2C+A">Andreas Uhl</a>, <a href="/search/cs?searchtype=author&amp;query=Alonso-Fernandez%2C+F">Fernando Alonso-Fernandez</a>, <a href="/search/cs?searchtype=author&amp;query=Farrugia%2C+R+A">Reuben A. Farrugia</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.01241v1-abstract-short" style="display: inline;"> In this work we test the ability of deep learning methods to provide an end-to-end mapping between low and high resolution images applying it to the iris recognition problem. Here, we propose the use of two deep learning single-image super-resolution approaches: Stacked Auto-Encoders (SAE) and Convolutional Neural Networks (CNN) with the most possible lightweight structure to achieve fast speed, p&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.01241v1-abstract-full').style.display = 'inline'; document.getElementById('2311.01241v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.01241v1-abstract-full" style="display: none;"> In this work we test the ability of deep learning methods to provide an end-to-end mapping between low and high resolution images applying it to the iris recognition problem. Here, we propose the use of two deep learning single-image super-resolution approaches: Stacked Auto-Encoders (SAE) and Convolutional Neural Networks (CNN) with the most possible lightweight structure to achieve fast speed, preserve local information and reduce artifacts at the same time. We validate the methods with a database of 1.872 near-infrared iris images with quality assessment and recognition experiments showing the superiority of deep learning approaches over the compared algorithms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.01241v1-abstract-full').style.display = 'none'; document.getElementById('2311.01241v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published at Proc. 25th European Signal Processing Conference, EUSIPCO 2017</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.02067">arXiv:2310.02067</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.02067">pdf</a>, <a href="https://arxiv.org/format/2310.02067">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.patrec.2024.04.009">10.1016/j.patrec.2024.04.009 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Content Bias in Deep Learning Image Age Approximation: A new Approach Towards better Explainability </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=J%C3%B6chl%2C+R">Robert J枚chl</a>, <a href="/search/cs?searchtype=author&amp;query=Uhl%2C+A">Andreas Uhl</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.02067v3-abstract-short" style="display: inline;"> In the context of temporal image forensics, it is not evident that a neural network, trained on images from different time-slots (classes), exploits solely image age related features. Usually, images taken in close temporal proximity (e.g., belonging to the same age class) share some common content properties. Such content bias can be exploited by a neural network. In this work, a novel approach i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.02067v3-abstract-full').style.display = 'inline'; document.getElementById('2310.02067v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.02067v3-abstract-full" style="display: none;"> In the context of temporal image forensics, it is not evident that a neural network, trained on images from different time-slots (classes), exploits solely image age related features. Usually, images taken in close temporal proximity (e.g., belonging to the same age class) share some common content properties. Such content bias can be exploited by a neural network. In this work, a novel approach is proposed that evaluates the influence of image content. This approach is verified using synthetic images (where content bias can be ruled out) with an age signal embedded. Based on the proposed approach, it is shown that a deep learning approach proposed in the context of age classification is most likely highly dependent on the image content. As a possible countermeasure, two different models from the field of image steganalysis, along with three different preprocessing techniques to increase the signal-to-noise ratio (age signal to image content), are evaluated using the proposed method. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.02067v3-abstract-full').style.display = 'none'; document.getElementById('2310.02067v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This is a preprint, the paper is currently under consideration at Pattern Recognition Letters</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Pattern Recognition Letters 182 (2024) 90-96 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.02715">arXiv:2303.02715</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2303.02715">pdf</a>, <a href="https://arxiv.org/format/2303.02715">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Deep Learning in the Field of Biometric Template Protection: An Overview </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Rathgeb%2C+C">Christian Rathgeb</a>, <a href="/search/cs?searchtype=author&amp;query=Kolberg%2C+J">Jascha Kolberg</a>, <a href="/search/cs?searchtype=author&amp;query=Uhl%2C+A">Andreas Uhl</a>, <a href="/search/cs?searchtype=author&amp;query=Busch%2C+C">Christoph Busch</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.02715v1-abstract-short" style="display: inline;"> Today, deep learning represents the most popular and successful form of machine learning. Deep learning has revolutionised the field of pattern recognition, including biometric recognition. Biometric systems utilising deep learning have been shown to achieve auspicious recognition accuracy, surpassing human performance. Apart from said breakthrough advances in terms of biometric performance, the u&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.02715v1-abstract-full').style.display = 'inline'; document.getElementById('2303.02715v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.02715v1-abstract-full" style="display: none;"> Today, deep learning represents the most popular and successful form of machine learning. Deep learning has revolutionised the field of pattern recognition, including biometric recognition. Biometric systems utilising deep learning have been shown to achieve auspicious recognition accuracy, surpassing human performance. Apart from said breakthrough advances in terms of biometric performance, the use of deep learning was reported to impact different covariates of biometrics such as algorithmic fairness, vulnerability to attacks, or template protection. Technologies of biometric template protection are designed to enable a secure and privacy-preserving deployment of biometrics. In the recent past, deep learning techniques have been frequently applied in biometric template protection systems for various purposes. This work provides an overview of how advances in deep learning take influence on the field of biometric template protection. The interrelation between improved biometric performance rates and security in biometric template protection is elaborated. Further, the use of deep learning for obtaining feature representations that are suitable for biometric template protection is discussed. Novel methods that apply deep learning to achieve various goals of biometric template protection are surveyed along with deep learning-based attacks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.02715v1-abstract-full').style.display = 'none'; document.getElementById('2303.02715v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2302.09973">arXiv:2302.09973</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2302.09973">pdf</a>, <a href="https://arxiv.org/format/2302.09973">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Advanced Image Quality Assessment for Hand- and Fingervein Biometrics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kirchgasser%2C+S">Simon Kirchgasser</a>, <a href="/search/cs?searchtype=author&amp;query=Kauba%2C+C">Christof Kauba</a>, <a href="/search/cs?searchtype=author&amp;query=Wimmer%2C+G">Georg Wimmer</a>, <a href="/search/cs?searchtype=author&amp;query=Uhl%2C+A">Andreas Uhl</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2302.09973v2-abstract-short" style="display: inline;"> Natural Scene Statistics commonly used in non-reference image quality measures and a deep learning based quality assessment approach are proposed as biometric quality indicators for vasculature images. While NIQE and BRISQUE if trained on common images with usual distortions do not work well for assessing vasculature pattern samples&#39; quality, their variants being trained on high and low quality va&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.09973v2-abstract-full').style.display = 'inline'; document.getElementById('2302.09973v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2302.09973v2-abstract-full" style="display: none;"> Natural Scene Statistics commonly used in non-reference image quality measures and a deep learning based quality assessment approach are proposed as biometric quality indicators for vasculature images. While NIQE and BRISQUE if trained on common images with usual distortions do not work well for assessing vasculature pattern samples&#39; quality, their variants being trained on high and low quality vasculature sample data behave as expected from a biometric quality estimator in most cases (deviations from the overall trend occur for certain datasets or feature extraction methods). The proposed deep learning based quality metric is capable of assigning the correct quality class to the vaculature pattern samples in most cases, independent of finger or hand vein patterns being assessed. The experiments were conducted on a total of 13 publicly available finger and hand vein datasets and involve three distinct template representations (two of them especially designed for vascular biometrics). The proposed (trained) quality measures are compared to a several classical quality metrics, with their achieved results underlining their promising behaviour. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.09973v2-abstract-full').style.display = 'none'; document.getElementById('2302.09973v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 February, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 February, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.05507">arXiv:2211.05507</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2211.05507">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1049/iet-bmt.2015.0069">10.1049/iet-bmt.2015.0069 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Experimental analysis regarding the influence of iris segmentation on the recognition rate </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hofbauer%2C+H">Heinz Hofbauer</a>, <a href="/search/cs?searchtype=author&amp;query=Alonso-Fernandez%2C+F">Fernando Alonso-Fernandez</a>, <a href="/search/cs?searchtype=author&amp;query=Bigun%2C+J">Josef Bigun</a>, <a href="/search/cs?searchtype=author&amp;query=Uhl%2C+A">Andreas Uhl</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.05507v1-abstract-short" style="display: inline;"> In this study the authors will look at the detection and segmentation of the iris and its influence on the overall performance of the iris-biometric tool chain. The authors will examine whether the segmentation accuracy, based on conformance with a ground truth, can serve as a predictor for the overall performance of the iris-biometric tool chain. That is: If the segmentation accuracy is improved&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.05507v1-abstract-full').style.display = 'inline'; document.getElementById('2211.05507v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.05507v1-abstract-full" style="display: none;"> In this study the authors will look at the detection and segmentation of the iris and its influence on the overall performance of the iris-biometric tool chain. The authors will examine whether the segmentation accuracy, based on conformance with a ground truth, can serve as a predictor for the overall performance of the iris-biometric tool chain. That is: If the segmentation accuracy is improved will this always improve the overall performance? Furthermore, the authors will systematically evaluate the influence of segmentation parameters, pupillary and limbic boundary and normalisation centre (based on Daugman&#39;s rubbersheet model), on the rest of the iris-biometric tool chain. The authors will investigate if accurately finding these parameters is important and how consistency, that is, extracting the same exact region of the iris during segmenting, influences the overall performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.05507v1-abstract-full').style.display = 'none'; document.getElementById('2211.05507v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published at IET Biometrics</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2210.13125">arXiv:2210.13125</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2210.13125">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1049/iet-bmt.2018.5146">10.1049/iet-bmt.2018.5146 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Iris super-resolution using CNNs: is photo-realism important to iris recognition? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ribeiro%2C+E">Eduardo Ribeiro</a>, <a href="/search/cs?searchtype=author&amp;query=Uhl%2C+A">Andreas Uhl</a>, <a href="/search/cs?searchtype=author&amp;query=Alonso-Fernandez%2C+F">Fernando Alonso-Fernandez</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2210.13125v1-abstract-short" style="display: inline;"> The use of low-resolution images adopting more relaxed acquisition conditions such as mobile phones and surveillance videos is becoming increasingly common in iris recognition nowadays. Concurrently, a great variety of single image super-resolution techniques are emerging, especially with the use of convolutional neural networks (CNNs). The main objective of these methods is to try to recover fine&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.13125v1-abstract-full').style.display = 'inline'; document.getElementById('2210.13125v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2210.13125v1-abstract-full" style="display: none;"> The use of low-resolution images adopting more relaxed acquisition conditions such as mobile phones and surveillance videos is becoming increasingly common in iris recognition nowadays. Concurrently, a great variety of single image super-resolution techniques are emerging, especially with the use of convolutional neural networks (CNNs). The main objective of these methods is to try to recover finer texture details generating more photo-realistic images based on the optimisation of an objective function depending basically on the CNN architecture and training approach. In this work, the authors explore single image super-resolution using CNNs for iris recognition. For this, they test different CNN architectures and use different training databases, validating their approach on a database of 1.872 near infrared iris images and on a mobile phone image database. They also use quality assessment, visual results and recognition experiments to verify if the photo-realism provided by the CNNs which have already proven to be effective for natural images can reflect in a better recognition rate for iris recognition. The results show that using deeper architectures trained with texture databases that provide a balance between edge preservation and the smoothness of the method can lead to good results in the iris recognition process. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.13125v1-abstract-full').style.display = 'none'; document.getElementById('2210.13125v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published at IET Biometrics</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2210.11129">arXiv:2210.11129</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2210.11129">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ISBA.2019.8778581">10.1109/ISBA.2019.8778581 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Super-Resolution and Image Re-projection for Iris Recognition </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ribeiro%2C+E">Eduardo Ribeiro</a>, <a href="/search/cs?searchtype=author&amp;query=Uhl%2C+A">Andreas Uhl</a>, <a href="/search/cs?searchtype=author&amp;query=Alonso-Fernandez%2C+F">Fernando Alonso-Fernandez</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2210.11129v1-abstract-short" style="display: inline;"> Several recent works have addressed the ability of deep learning to disclose rich, hierarchical and discriminative models for the most diverse purposes. Specifically in the super-resolution field, Convolutional Neural Networks (CNNs) using different deep learning approaches attempt to recover realistic texture and fine grained details from low resolution images. In this work we explore the viabili&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.11129v1-abstract-full').style.display = 'inline'; document.getElementById('2210.11129v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2210.11129v1-abstract-full" style="display: none;"> Several recent works have addressed the ability of deep learning to disclose rich, hierarchical and discriminative models for the most diverse purposes. Specifically in the super-resolution field, Convolutional Neural Networks (CNNs) using different deep learning approaches attempt to recover realistic texture and fine grained details from low resolution images. In this work we explore the viability of these approaches for iris Super-Resolution (SR) in an iris recognition environment. For this, we test different architectures with and without a so called image re-projection to reduce artifacts applying it to different iris databases to verify the viability of the different CNNs for iris super-resolution. Results show that CNNs and image re-projection can improve the results specially for the accuracy of recognition systems using a complete different training database performing the transfer learning successfully. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.11129v1-abstract-full').style.display = 'none'; document.getElementById('2210.11129v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published at IEEE International Conference on Identity, Security and Behavior Analysis, ISBA 2019</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2203.08972">arXiv:2203.08972</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2203.08972">pdf</a>, <a href="https://arxiv.org/ps/2203.08972">ps</a>, <a href="https://arxiv.org/format/2203.08972">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-981-19-5288-3_17">10.1007/978-981-19-5288-3_17 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Extensive Threat Analysis of Vein Attack Databases and Attack Detection by Fusion of Comparison Scores </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Schuiki%2C+J">Johannes Schuiki</a>, <a href="/search/cs?searchtype=author&amp;query=Linortner%2C+M">Michael Linortner</a>, <a href="/search/cs?searchtype=author&amp;query=Wimmer%2C+G">Georg Wimmer</a>, <a href="/search/cs?searchtype=author&amp;query=Uhl%2C+A">Andreas Uhl</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2203.08972v2-abstract-short" style="display: inline;"> The last decade has brought forward many great contributions regarding presentation attack detection for the domain of finger and hand vein biometrics. Among those contributions, one is able to find a variety of different attack databases that are either private or made publicly available to the research community. However, it is not always shown whether the used attack samples hold the capability&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.08972v2-abstract-full').style.display = 'inline'; document.getElementById('2203.08972v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2203.08972v2-abstract-full" style="display: none;"> The last decade has brought forward many great contributions regarding presentation attack detection for the domain of finger and hand vein biometrics. Among those contributions, one is able to find a variety of different attack databases that are either private or made publicly available to the research community. However, it is not always shown whether the used attack samples hold the capability to actually deceive a realistic vein recognition system. Inspired by previous works, this study provides a systematic threat evaluation including three publicly available finger vein attack databases and one private dorsal hand vein database. To do so, 14 distinct vein recognition schemes are confronted with attack samples and the percentage of wrongly accepted attack samples is then reported as the Impostor Attack Presentation Match Rate. As a second step, comparison scores from different recognition schemes are combined using score level fusion with the goal of performing presentation attack detection. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.08972v2-abstract-full').style.display = 'none'; document.getElementById('2203.08972v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This is a preprint of a chapter published in Handbook of Biometric Anti-Spoofing Third Edition: Presentation Attack Detection and Vulnerability Assessment, edited by Marcel, S., Fierrez, J., Evans, N., 2023, Springer, Singapore reproduced with permission of Springer Nature Singapore Pte Ltd</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2111.02493">arXiv:2111.02493</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2111.02493">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1088/1361-6501/ac2dbd">10.1088/1361-6501/ac2dbd <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Roadmap on Signal Processing for Next Generation Measurement Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Iakovidis%2C+D+K">D. K. Iakovidis</a>, <a href="/search/cs?searchtype=author&amp;query=Ooi%2C+M">M. Ooi</a>, <a href="/search/cs?searchtype=author&amp;query=Kuang%2C+Y+C">Y. C. Kuang</a>, <a href="/search/cs?searchtype=author&amp;query=Demidenko%2C+S">S. Demidenko</a>, <a href="/search/cs?searchtype=author&amp;query=Shestakov%2C+A">A. Shestakov</a>, <a href="/search/cs?searchtype=author&amp;query=Sinitsin%2C+V">V. Sinitsin</a>, <a href="/search/cs?searchtype=author&amp;query=Henry%2C+M">M. Henry</a>, <a href="/search/cs?searchtype=author&amp;query=Sciacchitano%2C+A">A. Sciacchitano</a>, <a href="/search/cs?searchtype=author&amp;query=Discetti%2C+A">A. Discetti</a>, <a href="/search/cs?searchtype=author&amp;query=Donati%2C+S">S. Donati</a>, <a href="/search/cs?searchtype=author&amp;query=Norgia%2C+M">M. Norgia</a>, <a href="/search/cs?searchtype=author&amp;query=Menychtas%2C+A">A. Menychtas</a>, <a href="/search/cs?searchtype=author&amp;query=Maglogiannis%2C+I">I. Maglogiannis</a>, <a href="/search/cs?searchtype=author&amp;query=Wriessnegger%2C+S+C">S. C. Wriessnegger</a>, <a href="/search/cs?searchtype=author&amp;query=Chacon%2C+L+A+B">L. A. Barradas Chacon</a>, <a href="/search/cs?searchtype=author&amp;query=Dimas%2C+G">G. Dimas</a>, <a href="/search/cs?searchtype=author&amp;query=Filos%2C+D">D. Filos</a>, <a href="/search/cs?searchtype=author&amp;query=Aletras%2C+A+H">A. H. Aletras</a>, <a href="/search/cs?searchtype=author&amp;query=T%C3%B6ger%2C+J">J. T枚ger</a>, <a href="/search/cs?searchtype=author&amp;query=Dong%2C+F">F. Dong</a>, <a href="/search/cs?searchtype=author&amp;query=Ren%2C+S">S. Ren</a>, <a href="/search/cs?searchtype=author&amp;query=Uhl%2C+A">A. Uhl</a>, <a href="/search/cs?searchtype=author&amp;query=Paziewski%2C+J">J. Paziewski</a>, <a href="/search/cs?searchtype=author&amp;query=Geng%2C+J">J. Geng</a>, <a href="/search/cs?searchtype=author&amp;query=Fioranelli%2C+F">F. Fioranelli</a> , et al. (9 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2111.02493v3-abstract-short" style="display: inline;"> Signal processing is a fundamental component of almost any sensor-enabled system, with a wide range of applications across different scientific disciplines. Time series data, images, and video sequences comprise representative forms of signals that can be enhanced and analysed for information extraction and quantification. The recent advances in artificial intelligence and machine learning are shi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.02493v3-abstract-full').style.display = 'inline'; document.getElementById('2111.02493v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2111.02493v3-abstract-full" style="display: none;"> Signal processing is a fundamental component of almost any sensor-enabled system, with a wide range of applications across different scientific disciplines. Time series data, images, and video sequences comprise representative forms of signals that can be enhanced and analysed for information extraction and quantification. The recent advances in artificial intelligence and machine learning are shifting the research attention towards intelligent, data-driven, signal processing. This roadmap presents a critical overview of the state-of-the-art methods and applications aiming to highlight future challenges and research opportunities towards next generation measurement systems. It covers a broad spectrum of topics ranging from basic to industrial research, organized in concise thematic sections that reflect the trends and the impacts of current and future developments per research field. Furthermore, it offers guidance to researchers and funding agencies in identifying new prospects. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.02493v3-abstract-full').style.display = 'none'; document.getElementById('2111.02493v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">48 pages, https://iopscience.iop.org/article/10.1088/1361-6501/ac2dbd</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Measurement Science and Technology 33(1) (2022) 1-48 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2103.01632">arXiv:2103.01632</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2103.01632">pdf</a>, <a href="https://arxiv.org/format/2103.01632">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Using CNNs to Identify the Origin of Finger Vein Image </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Maser%2C+B">Babak Maser</a>, <a href="/search/cs?searchtype=author&amp;query=Uhl%2C+A">Andreas Uhl</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2103.01632v1-abstract-short" style="display: inline;"> We study the finger vein (FV) sensor model identification task using a deep learning approach. So far, for this biometric modality, only correlation-based PRNU and texture descriptor-based methods have been applied. We employ five prominent CNN architectures covering a wide range of CNN family models, including VGG16, ResNet, and the Xception model. In addition, a novel architecture termed FV2021&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.01632v1-abstract-full').style.display = 'inline'; document.getElementById('2103.01632v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2103.01632v1-abstract-full" style="display: none;"> We study the finger vein (FV) sensor model identification task using a deep learning approach. So far, for this biometric modality, only correlation-based PRNU and texture descriptor-based methods have been applied. We employ five prominent CNN architectures covering a wide range of CNN family models, including VGG16, ResNet, and the Xception model. In addition, a novel architecture termed FV2021 is proposed in this work, which excels by its compactness and a low number of parameters to be trained. Original samples, as well as the region of interest data from eight publicly accessible FV datasets, are used in experimentation. An excellent sensor identification AUC-ROC score of 1.0 for patches of uncropped samples and 0.9997 for ROI samples have been achieved. The comparison with former methods shows that the CNN-based approach is superior and improved the results. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.01632v1-abstract-full').style.display = 'none'; document.getElementById('2103.01632v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 March, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.03992">arXiv:2102.03992</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2102.03992">pdf</a>, <a href="https://arxiv.org/format/2102.03992">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Identifying the Origin of Finger Vein Samples Using Texture Descriptors </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Maser%2C+B">Babak Maser</a>, <a href="/search/cs?searchtype=author&amp;query=Uhl%2C+A">Andreas Uhl</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.03992v1-abstract-short" style="display: inline;"> Identifying the origin of a sample image in biometric systems can be beneficial for data authentication in case of attacks against the system and for initiating sensor-specific processing pipelines in sensor-heterogeneous environments. Motivated by shortcomings of the photo response non-uniformity (PRNU) based method in the biometric context, we use a texture classification approach to detect the&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.03992v1-abstract-full').style.display = 'inline'; document.getElementById('2102.03992v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.03992v1-abstract-full" style="display: none;"> Identifying the origin of a sample image in biometric systems can be beneficial for data authentication in case of attacks against the system and for initiating sensor-specific processing pipelines in sensor-heterogeneous environments. Motivated by shortcomings of the photo response non-uniformity (PRNU) based method in the biometric context, we use a texture classification approach to detect the origin of finger vein sample images. Based on eight publicly available finger vein datasets and applying eight classical yet simple texture descriptors and SVM classification, we demonstrate excellent sensor model identification results for raw finger vein samples as well as for the more challenging region of interest data. The observed results establish texture descriptors as effective competitors to PRNU in finger vein sensor model identification. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.03992v1-abstract-full').style.display = 'none'; document.getElementById('2102.03992v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2101.04450">arXiv:2101.04450</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2101.04450">pdf</a>, <a href="https://arxiv.org/format/2101.04450">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Two-stage CNN-based wood log recognition </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wimmer%2C+G">Georg Wimmer</a>, <a href="/search/cs?searchtype=author&amp;query=Schraml%2C+R">Rudolf Schraml</a>, <a href="/search/cs?searchtype=author&amp;query=Hofbauer%2C+H">Heinz Hofbauer</a>, <a href="/search/cs?searchtype=author&amp;query=Petutschnigg%2C+A">Alexander Petutschnigg</a>, <a href="/search/cs?searchtype=author&amp;query=Uhl%2C+A">Andreas Uhl</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2101.04450v1-abstract-short" style="display: inline;"> The proof of origin of logs is becoming increasingly important. In the context of Industry 4.0 and to combat illegal logging there is an increasing motivation to track each individual log. Our previous works in this field focused on log tracking using digital log end images based on methods inspired by fingerprint and iris-recognition. This work presents a convolutional neural network (CNN) based&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.04450v1-abstract-full').style.display = 'inline'; document.getElementById('2101.04450v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2101.04450v1-abstract-full" style="display: none;"> The proof of origin of logs is becoming increasingly important. In the context of Industry 4.0 and to combat illegal logging there is an increasing motivation to track each individual log. Our previous works in this field focused on log tracking using digital log end images based on methods inspired by fingerprint and iris-recognition. This work presents a convolutional neural network (CNN) based approach which comprises a CNN-based segmentation of the log end combined with a final CNN-based recognition of the segmented log end using the triplet loss function for CNN training. Results show that the proposed two-stage CNN-based approach outperforms traditional approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.04450v1-abstract-full').style.display = 'none'; document.getElementById('2101.04450v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">submitted to ICIP 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2012.00606">arXiv:2012.00606</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2012.00606">pdf</a>, <a href="https://arxiv.org/format/2012.00606">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Enabling Fingerprint Presentation Attacks: Fake Fingerprint Fabrication Techniques and Recognition Performance </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kauba%2C+C">Christof Kauba</a>, <a href="/search/cs?searchtype=author&amp;query=Debiasi%2C+L">Luca Debiasi</a>, <a href="/search/cs?searchtype=author&amp;query=Uhl%2C+A">Andreas Uhl</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2012.00606v1-abstract-short" style="display: inline;"> Fake fingerprint representation pose a severe threat for fingerprint based authentication systems. Despite advances in presentation attack detection technologies, which are often integrated directly into the fingerprint scanner devices, many fingerprint scanners are still susceptible to presentation attacks using physical fake fingerprint representation. In this work we evaluate five different com&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.00606v1-abstract-full').style.display = 'inline'; document.getElementById('2012.00606v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2012.00606v1-abstract-full" style="display: none;"> Fake fingerprint representation pose a severe threat for fingerprint based authentication systems. Despite advances in presentation attack detection technologies, which are often integrated directly into the fingerprint scanner devices, many fingerprint scanners are still susceptible to presentation attacks using physical fake fingerprint representation. In this work we evaluate five different commercial-off-the-shelf fingerprint scanners based on different sensing technologies, including optical, optical multispectral, passive capacitive, active capacitive and thermal regarding their susceptibility to presentation attacks using fake fingerprint representations. Several different materials to create the fake representation are tested and evaluated, including wax, cast, latex, silicone, different types of glue, window colours, modelling clay, etc. The quantitative evaluation includes assessing the fingerprint quality of the samples captured from the fake representations as well as comparison experiments where the achieved matching scores of the fake representations against the corresponding real fingerprints indicate the effectiveness of the fake representations. Our results confirmed that all except one of the tested devices are susceptible to at least one type/material of fake fingerprint representations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.00606v1-abstract-full').style.display = 'none'; document.getElementById('2012.00606v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 December, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">23 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.5.4 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2004.12604">arXiv:2004.12604</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2004.12604">pdf</a>, <a href="https://arxiv.org/format/2004.12604">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Improving Endoscopic Decision Support Systems by Translating Between Imaging Modalities </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wimmer%2C+G">Georg Wimmer</a>, <a href="/search/cs?searchtype=author&amp;query=Gadermayr%2C+M">Michael Gadermayr</a>, <a href="/search/cs?searchtype=author&amp;query=V%C3%A9csei%2C+A">Andreas V茅csei</a>, <a href="/search/cs?searchtype=author&amp;query=Uhl%2C+A">Andreas Uhl</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2004.12604v1-abstract-short" style="display: inline;"> Novel imaging technologies raise many questions concerning the adaptation of computer-aided decision support systems. Classification models either need to be adapted or even newly trained from scratch to exploit the full potential of enhanced techniques. Both options typically require the acquisition of new labeled training data. In this work we investigate the applicability of image-to-image tran&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.12604v1-abstract-full').style.display = 'inline'; document.getElementById('2004.12604v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2004.12604v1-abstract-full" style="display: none;"> Novel imaging technologies raise many questions concerning the adaptation of computer-aided decision support systems. Classification models either need to be adapted or even newly trained from scratch to exploit the full potential of enhanced techniques. Both options typically require the acquisition of new labeled training data. In this work we investigate the applicability of image-to-image translation to endoscopic images showing different imaging modalities, namely conventional white-light and narrow-band imaging. In a study on computer-aided celiac disease diagnosis, we explore whether image-to-image translation is capable of effectively performing the translation between the domains. We investigate if models can be trained on virtual (or a mixture of virtual and real) samples to improve overall accuracy in a setting with limited labeled training data. Finally, we also ask whether a translation of testing images to another domain is capable of improving accuracy by exploiting the enhanced imaging characteristics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.12604v1-abstract-full').style.display = 'none'; document.getElementById('2004.12604v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submitted to MICCAI 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2004.01418">arXiv:2004.01418</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2004.01418">pdf</a>, <a href="https://arxiv.org/format/2004.01418">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.23919/Eusipco47968.2020.9287722">10.23919/Eusipco47968.2020.9287722 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Demographic Bias: A Challenge for Fingervein Recognition Systems? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Drozdowski%2C+P">P. Drozdowski</a>, <a href="/search/cs?searchtype=author&amp;query=Prommegger%2C+B">B. Prommegger</a>, <a href="/search/cs?searchtype=author&amp;query=Wimmer%2C+G">G. Wimmer</a>, <a href="/search/cs?searchtype=author&amp;query=Schraml%2C+R">R. Schraml</a>, <a href="/search/cs?searchtype=author&amp;query=Rathgeb%2C+C">C. Rathgeb</a>, <a href="/search/cs?searchtype=author&amp;query=Uhl%2C+A">A. Uhl</a>, <a href="/search/cs?searchtype=author&amp;query=Busch%2C+C">C. Busch</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2004.01418v1-abstract-short" style="display: inline;"> Recently, concerns regarding potential biases in the underlying algorithms of many automated systems (including biometrics) have been raised. In this context, a biased algorithm produces statistically different outcomes for different groups of individuals based on certain (often protected by anti-discrimination legislation) attributes such as sex and age. While several preliminary studies investig&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.01418v1-abstract-full').style.display = 'inline'; document.getElementById('2004.01418v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2004.01418v1-abstract-full" style="display: none;"> Recently, concerns regarding potential biases in the underlying algorithms of many automated systems (including biometrics) have been raised. In this context, a biased algorithm produces statistically different outcomes for different groups of individuals based on certain (often protected by anti-discrimination legislation) attributes such as sex and age. While several preliminary studies investigating this matter for facial recognition algorithms do exist, said topic has not yet been addressed for vascular biometric characteristics. Accordingly, in this paper, several popular types of recognition algorithms are benchmarked to ascertain the matter for fingervein recognition. The experimental evaluation suggests lack of bias for the tested algorithms, although future works with larger datasets are needed to validate and confirm those preliminary results. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.01418v1-abstract-full').style.display = 'none'; document.getElementById('2004.01418v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 2 figures, 8 tables. Submitted to European Signal Processing Conference (EUSIPCO) -- special session on bias in biometrics</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1707.04041">arXiv:1707.04041</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1707.04041">pdf</a>, <a href="https://arxiv.org/format/1707.04041">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Algebraic Topology">math.AT</span> </div> </div> <p class="title is-5 mathjax"> Deep Learning with Topological Signatures </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hofer%2C+C">Christoph Hofer</a>, <a href="/search/cs?searchtype=author&amp;query=Kwitt%2C+R">Roland Kwitt</a>, <a href="/search/cs?searchtype=author&amp;query=Niethammer%2C+M">Marc Niethammer</a>, <a href="/search/cs?searchtype=author&amp;query=Uhl%2C+A">Andreas Uhl</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1707.04041v3-abstract-short" style="display: inline;"> Inferring topological and geometrical information from data can offer an alternative perspective on machine learning problems. Methods from topological data analysis, e.g., persistent homology, enable us to obtain such information, typically in the form of summary representations of topological features. However, such topological signatures often come with an unusual structure (e.g., multisets of&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1707.04041v3-abstract-full').style.display = 'inline'; document.getElementById('1707.04041v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1707.04041v3-abstract-full" style="display: none;"> Inferring topological and geometrical information from data can offer an alternative perspective on machine learning problems. Methods from topological data analysis, e.g., persistent homology, enable us to obtain such information, typically in the form of summary representations of topological features. However, such topological signatures often come with an unusual structure (e.g., multisets of intervals) that is highly impractical for most machine learning techniques. While many strategies have been proposed to map these topological signatures into machine learning compatible representations, they suffer from being agnostic to the target learning task. In contrast, we propose a technique that enables us to input topological signatures to deep neural networks and learn a task-optimal representation during training. Our approach is realized as a novel input layer with favorable theoretical properties. Classification experiments on 2D object shapes and social network graphs demonstrate the versatility of the approach and, in case of the latter, we even outperform the state-of-the-art by a large margin. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1707.04041v3-abstract-full').style.display = 'none'; document.getElementById('1707.04041v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 February, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 July, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2017. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1505.01065">arXiv:1505.01065</a> <span>&nbsp;&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Proceedings of The 39th Annual Workshop of the Austrian Association for Pattern Recognition (OAGM), 2015 </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hegenbart%2C+S">Sebastian Hegenbart</a>, <a href="/search/cs?searchtype=author&amp;query=Kwitt%2C+R">Roland Kwitt</a>, <a href="/search/cs?searchtype=author&amp;query=Uhl%2C+A">Andreas Uhl</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1505.01065v1-abstract-short" style="display: inline;"> The 39th annual workshop of the Austrian Association for Pattern Recognition (OAGM/AAPR) provides a platform for presentation and discussion of research progress as well as research projects within the OAGM/AAPR community. </span> <span class="abstract-full has-text-grey-dark mathjax" id="1505.01065v1-abstract-full" style="display: none;"> The 39th annual workshop of the Austrian Association for Pattern Recognition (OAGM/AAPR) provides a platform for presentation and discussion of research progress as well as research projects within the OAGM/AAPR community. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1505.01065v1-abstract-full').style.display = 'none'; document.getElementById('1505.01065v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 April, 2015; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2015. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Index submitted before individual papers</span> </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10