CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;17 of 17 results for author: <span class="mathjax">Orr霉, G</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Orr%C3%B9%2C+G">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Orr霉, G"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Orr%C3%B9%2C+G&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Orr霉, G"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.16341">arXiv:2410.16341</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.16341">pdf</a>, <a href="https://arxiv.org/format/2410.16341">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Vulnerabilities in Machine Learning-Based Voice Disorder Detection Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Perelli%2C+G">Gianpaolo Perelli</a>, <a href="/search/cs?searchtype=author&amp;query=Panzino%2C+A">Andrea Panzino</a>, <a href="/search/cs?searchtype=author&amp;query=Casula%2C+R">Roberto Casula</a>, <a href="/search/cs?searchtype=author&amp;query=Micheletto%2C+M">Marco Micheletto</a>, <a href="/search/cs?searchtype=author&amp;query=Orr%C3%B9%2C+G">Giulia Orr霉</a>, <a href="/search/cs?searchtype=author&amp;query=Marcialis%2C+G+L">Gian Luca Marcialis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.16341v1-abstract-short" style="display: inline;"> The impact of voice disorders is becoming more widely acknowledged as a public health issue. Several machine learning-based classifiers with the potential to identify disorders have been used in recent studies to differentiate between normal and pathological voices and sounds. In this paper, we focus on analyzing the vulnerabilities of these systems by exploring the possibility of attacks that can&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.16341v1-abstract-full').style.display = 'inline'; document.getElementById('2410.16341v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.16341v1-abstract-full" style="display: none;"> The impact of voice disorders is becoming more widely acknowledged as a public health issue. Several machine learning-based classifiers with the potential to identify disorders have been used in recent studies to differentiate between normal and pathological voices and sounds. In this paper, we focus on analyzing the vulnerabilities of these systems by exploring the possibility of attacks that can reverse classification and compromise their reliability. Given the critical nature of personal health information, understanding which types of attacks are effective is a necessary first step toward improving the security of such systems. Starting from the original audios, we implement various attack methods, including adversarial, evasion, and pitching techniques, and evaluate how state-of-the-art disorder detection models respond to them. Our findings identify the most effective attack strategies, underscoring the need to address these vulnerabilities in machine-learning systems used in the healthcare domain. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.16341v1-abstract-full').style.display = 'none'; document.getElementById('2410.16341v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7 pages, 17 figures, accepted for 16th IEEE INTERNATIONAL WORKSHOP ON INFORMATION FORENSICS AND SECURITY (WIFS) 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.10481">arXiv:2409.10481</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.10481">pdf</a>, <a href="https://arxiv.org/format/2409.10481">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Exploring 3D Face Reconstruction and Fusion Methods for Face Verification: A Case-Study in Video Surveillance </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=La+Cava%2C+S+M">Simone Maurizio La Cava</a>, <a href="/search/cs?searchtype=author&amp;query=Concas%2C+S">Sara Concas</a>, <a href="/search/cs?searchtype=author&amp;query=Tolosana%2C+R">Ruben Tolosana</a>, <a href="/search/cs?searchtype=author&amp;query=Casula%2C+R">Roberto Casula</a>, <a href="/search/cs?searchtype=author&amp;query=Orr%C3%B9%2C+G">Giulia Orr霉</a>, <a href="/search/cs?searchtype=author&amp;query=Drahansky%2C+M">Martin Drahansky</a>, <a href="/search/cs?searchtype=author&amp;query=Fierrez%2C+J">Julian Fierrez</a>, <a href="/search/cs?searchtype=author&amp;query=Marcialis%2C+G+L">Gian Luca Marcialis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.10481v1-abstract-short" style="display: inline;"> 3D face reconstruction (3DFR) algorithms are based on specific assumptions tailored to distinct application scenarios. These assumptions limit their use when acquisition conditions, such as the subject&#39;s distance from the camera or the camera&#39;s characteristics, are different than expected, as typically happens in video surveillance. Additionally, 3DFR algorithms follow various strategies to addres&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.10481v1-abstract-full').style.display = 'inline'; document.getElementById('2409.10481v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.10481v1-abstract-full" style="display: none;"> 3D face reconstruction (3DFR) algorithms are based on specific assumptions tailored to distinct application scenarios. These assumptions limit their use when acquisition conditions, such as the subject&#39;s distance from the camera or the camera&#39;s characteristics, are different than expected, as typically happens in video surveillance. Additionally, 3DFR algorithms follow various strategies to address the reconstruction of a 3D shape from 2D data, such as statistical model fitting, photometric stereo, or deep learning. In the present study, we explore the application of three 3DFR algorithms representative of the SOTA, employing each one as the template set generator for a face verification system. The scores provided by each system are combined by score-level fusion. We show that the complementarity induced by different 3DFR algorithms improves performance when tests are conducted at never-seen-before distances from the camera and camera characteristics (cross-distance and cross-camera settings), thus encouraging further investigations on multiple 3DFR-based approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.10481v1-abstract-full').style.display = 'none'; document.getElementById('2409.10481v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at T-CAP - Towards a Complete Analysis of People: Fine-grained Understanding for Real-World Applications, workshop in conjunction with the 18th European Conference on Computer Vision ECCV 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.04580">arXiv:2404.04580</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.04580">pdf</a>, <a href="https://arxiv.org/format/2404.04580">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> SDFR: Synthetic Data for Face Recognition Competition </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shahreza%2C+H+O">Hatef Otroshi Shahreza</a>, <a href="/search/cs?searchtype=author&amp;query=Ecabert%2C+C">Christophe Ecabert</a>, <a href="/search/cs?searchtype=author&amp;query=George%2C+A">Anjith George</a>, <a href="/search/cs?searchtype=author&amp;query=Unnervik%2C+A">Alexander Unnervik</a>, <a href="/search/cs?searchtype=author&amp;query=Marcel%2C+S">S茅bastien Marcel</a>, <a href="/search/cs?searchtype=author&amp;query=Di+Domenico%2C+N">Nicol貌 Di Domenico</a>, <a href="/search/cs?searchtype=author&amp;query=Borghi%2C+G">Guido Borghi</a>, <a href="/search/cs?searchtype=author&amp;query=Maltoni%2C+D">Davide Maltoni</a>, <a href="/search/cs?searchtype=author&amp;query=Boutros%2C+F">Fadi Boutros</a>, <a href="/search/cs?searchtype=author&amp;query=Vogel%2C+J">Julia Vogel</a>, <a href="/search/cs?searchtype=author&amp;query=Damer%2C+N">Naser Damer</a>, <a href="/search/cs?searchtype=author&amp;query=S%C3%A1nchez-P%C3%A9rez%2C+%C3%81">脕ngela S谩nchez-P茅rez</a>, <a href="/search/cs?searchtype=author&amp;query=EnriqueMas-Candela"> EnriqueMas-Candela</a>, <a href="/search/cs?searchtype=author&amp;query=Calvo-Zaragoza%2C+J">Jorge Calvo-Zaragoza</a>, <a href="/search/cs?searchtype=author&amp;query=Biesseck%2C+B">Bernardo Biesseck</a>, <a href="/search/cs?searchtype=author&amp;query=Vidal%2C+P">Pedro Vidal</a>, <a href="/search/cs?searchtype=author&amp;query=Granada%2C+R">Roger Granada</a>, <a href="/search/cs?searchtype=author&amp;query=Menotti%2C+D">David Menotti</a>, <a href="/search/cs?searchtype=author&amp;query=DeAndres-Tame%2C+I">Ivan DeAndres-Tame</a>, <a href="/search/cs?searchtype=author&amp;query=La+Cava%2C+S+M">Simone Maurizio La Cava</a>, <a href="/search/cs?searchtype=author&amp;query=Concas%2C+S">Sara Concas</a>, <a href="/search/cs?searchtype=author&amp;query=Melzi%2C+P">Pietro Melzi</a>, <a href="/search/cs?searchtype=author&amp;query=Tolosana%2C+R">Ruben Tolosana</a>, <a href="/search/cs?searchtype=author&amp;query=Vera-Rodriguez%2C+R">Ruben Vera-Rodriguez</a>, <a href="/search/cs?searchtype=author&amp;query=Perelli%2C+G">Gianpaolo Perelli</a> , et al. (3 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.04580v2-abstract-short" style="display: inline;"> Large-scale face recognition datasets are collected by crawling the Internet and without individuals&#39; consent, raising legal, ethical, and privacy concerns. With the recent advances in generative models, recently several works proposed generating synthetic face recognition datasets to mitigate concerns in web-crawled face recognition datasets. This paper presents the summary of the Synthetic Data&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.04580v2-abstract-full').style.display = 'inline'; document.getElementById('2404.04580v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.04580v2-abstract-full" style="display: none;"> Large-scale face recognition datasets are collected by crawling the Internet and without individuals&#39; consent, raising legal, ethical, and privacy concerns. With the recent advances in generative models, recently several works proposed generating synthetic face recognition datasets to mitigate concerns in web-crawled face recognition datasets. This paper presents the summary of the Synthetic Data for Face Recognition (SDFR) Competition held in conjunction with the 18th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2024) and established to investigate the use of synthetic data for training face recognition models. The SDFR competition was split into two tasks, allowing participants to train face recognition systems using new synthetic datasets and/or existing ones. In the first task, the face recognition backbone was fixed and the dataset size was limited, while the second task provided almost complete freedom on the model backbone, the dataset, and the training pipeline. The submitted models were trained on existing and also new synthetic datasets and used clever methods to improve training with synthetic data. The submissions were evaluated and ranked on a diverse set of seven benchmarking datasets. The paper gives an overview of the submitted face recognition models and reports achieved performance compared to baseline models trained on real and synthetic datasets. Furthermore, the evaluation of submissions is extended to bias assessment across different demography groups. Lastly, an outlook on the current state of the research in training face recognition models using synthetic data is presented, and existing problems as well as potential future directions are also discussed. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.04580v2-abstract-full').style.display = 'none'; document.getElementById('2404.04580v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 6 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The 18th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2024)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.15578">arXiv:2309.15578</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2309.15578">pdf</a>, <a href="https://arxiv.org/ps/2309.15578">ps</a>, <a href="https://arxiv.org/format/2309.15578">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> LivDet2023 -- Fingerprint Liveness Detection Competition: Advancing Generalization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Micheletto%2C+M">Marco Micheletto</a>, <a href="/search/cs?searchtype=author&amp;query=Casula%2C+R">Roberto Casula</a>, <a href="/search/cs?searchtype=author&amp;query=Orr%C3%B9%2C+G">Giulia Orr霉</a>, <a href="/search/cs?searchtype=author&amp;query=Carta%2C+S">Simone Carta</a>, <a href="/search/cs?searchtype=author&amp;query=Concas%2C+S">Sara Concas</a>, <a href="/search/cs?searchtype=author&amp;query=La+Cava%2C+S+M">Simone Maurizio La Cava</a>, <a href="/search/cs?searchtype=author&amp;query=Fierrez%2C+J">Julian Fierrez</a>, <a href="/search/cs?searchtype=author&amp;query=Marcialis%2C+G+L">Gian Luca Marcialis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.15578v1-abstract-short" style="display: inline;"> The International Fingerprint Liveness Detection Competition (LivDet) is a biennial event that invites academic and industry participants to prove their advancements in Fingerprint Presentation Attack Detection (PAD). This edition, LivDet2023, proposed two challenges, Liveness Detection in Action and Fingerprint Representation, to evaluate the efficacy of PAD embedded in verification systems and t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.15578v1-abstract-full').style.display = 'inline'; document.getElementById('2309.15578v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.15578v1-abstract-full" style="display: none;"> The International Fingerprint Liveness Detection Competition (LivDet) is a biennial event that invites academic and industry participants to prove their advancements in Fingerprint Presentation Attack Detection (PAD). This edition, LivDet2023, proposed two challenges, Liveness Detection in Action and Fingerprint Representation, to evaluate the efficacy of PAD embedded in verification systems and the effectiveness and compactness of feature sets. A third, hidden challenge is the inclusion of two subsets in the training set whose sensor information is unknown, testing participants ability to generalize their models. Only bona fide fingerprint samples were provided to participants, and the competition reports and assesses the performance of their algorithms suffering from this limitation in data availability. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.15578v1-abstract-full').style.display = 'none'; document.getElementById('2309.15578v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 10 tables, IEEE International Joint Conference on Biometrics (IJCB 2023)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.11357">arXiv:2309.11357</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2309.11357">pdf</a>, <a href="https://arxiv.org/format/2309.11357">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> 3D Face Reconstruction: the Road to Forensics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=La+Cava%2C+S+M">Simone Maurizio La Cava</a>, <a href="/search/cs?searchtype=author&amp;query=Orr%C3%B9%2C+G">Giulia Orr霉</a>, <a href="/search/cs?searchtype=author&amp;query=Drahansky%2C+M">Martin Drahansky</a>, <a href="/search/cs?searchtype=author&amp;query=Marcialis%2C+G+L">Gian Luca Marcialis</a>, <a href="/search/cs?searchtype=author&amp;query=Roli%2C+F">Fabio Roli</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.11357v1-abstract-short" style="display: inline;"> 3D face reconstruction algorithms from images and videos are applied to many fields, from plastic surgery to the entertainment sector, thanks to their advantageous features. However, when looking at forensic applications, 3D face reconstruction must observe strict requirements that still make its possible role in bringing evidence to a lawsuit unclear. An extensive investigation of the constraints&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.11357v1-abstract-full').style.display = 'inline'; document.getElementById('2309.11357v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.11357v1-abstract-full" style="display: none;"> 3D face reconstruction algorithms from images and videos are applied to many fields, from plastic surgery to the entertainment sector, thanks to their advantageous features. However, when looking at forensic applications, 3D face reconstruction must observe strict requirements that still make its possible role in bringing evidence to a lawsuit unclear. An extensive investigation of the constraints, potential, and limits of its application in forensics is still missing. Shedding some light on this matter is the goal of the present survey, which starts by clarifying the relation between forensic applications and biometrics, with a focus on face recognition. Therefore, it provides an analysis of the achievements of 3D face reconstruction algorithms from surveillance videos and mugshot images and discusses the current obstacles that separate 3D face reconstruction from an active role in forensic applications. Finally, it examines the underlying data sets, with their advantages and limitations, while proposing alternatives that could substitute or complement them. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.11357v1-abstract-full').style.display = 'none'; document.getElementById('2309.11357v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The manuscript has been accepted for publication in ACM Computing Surveys. arXiv admin note: text overlap with arXiv:2303.11164</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.11164">arXiv:2303.11164</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2303.11164">pdf</a>, <a href="https://arxiv.org/ps/2303.11164">ps</a>, <a href="https://arxiv.org/format/2303.11164">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ICPR56361.2022.9956031">10.1109/ICPR56361.2022.9956031 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> 3D Face Reconstruction for Forensic Recognition -- A Survey </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=La+Cava%2C+S+M">Simone Maurizio La Cava</a>, <a href="/search/cs?searchtype=author&amp;query=Orr%C3%B9%2C+G">Giulia Orr霉</a>, <a href="/search/cs?searchtype=author&amp;query=Goldmann%2C+T">Tom谩拧 Goldmann</a>, <a href="/search/cs?searchtype=author&amp;query=Drahansky%2C+M">Martin Drahansky</a>, <a href="/search/cs?searchtype=author&amp;query=Marcialis%2C+G+L">Gian Luca Marcialis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.11164v1-abstract-short" style="display: inline;"> 3D face reconstruction algorithms from images and videos are applied to many fields, from plastic surgery to the entertainment sector, thanks to their advantageous features. However, when looking at forensic applications, 3D face reconstruction must observe strict requirements that still make unclear its possible role in bringing evidence to a lawsuit. Shedding some light on this matter is the goa&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.11164v1-abstract-full').style.display = 'inline'; document.getElementById('2303.11164v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.11164v1-abstract-full" style="display: none;"> 3D face reconstruction algorithms from images and videos are applied to many fields, from plastic surgery to the entertainment sector, thanks to their advantageous features. However, when looking at forensic applications, 3D face reconstruction must observe strict requirements that still make unclear its possible role in bringing evidence to a lawsuit. Shedding some light on this matter is the goal of the present survey, where we start by clarifying the relation between forensic applications and biometrics. To our knowledge, no previous work adopted this relation to make the point on the state of the art. Therefore, we analyzed the achievements of 3D face reconstruction algorithms from surveillance videos and mugshot images and discussed the current obstacles that separate 3D face reconstruction from an active role in forensic applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.11164v1-abstract-full').style.display = 'none'; document.getElementById('2303.11164v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 February, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2202.07259">arXiv:2202.07259</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2202.07259">pdf</a>, <a href="https://arxiv.org/format/2202.07259">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Review of the Fingerprint Liveness Detection (LivDet) competition series: from 2009 to 2021 </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Micheletto%2C+M">Marco Micheletto</a>, <a href="/search/cs?searchtype=author&amp;query=Orr%C3%B9%2C+G">Giulia Orr霉</a>, <a href="/search/cs?searchtype=author&amp;query=Casula%2C+R">Roberto Casula</a>, <a href="/search/cs?searchtype=author&amp;query=Yambay%2C+D">David Yambay</a>, <a href="/search/cs?searchtype=author&amp;query=Marcialis%2C+G+L">Gian Luca Marcialis</a>, <a href="/search/cs?searchtype=author&amp;query=Schuckers%2C+S+C">Stephanie C. Schuckers</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2202.07259v1-abstract-short" style="display: inline;"> Fingerprint authentication systems are highly vulnerable to artificial reproductions of fingerprint, called fingerprint presentation attacks. Detecting presentation attacks is not trivial because attackers refine their replication techniques from year to year. The International Fingerprint liveness Detection Competition (LivDet), an open and well-acknowledged meeting point of academies and private&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.07259v1-abstract-full').style.display = 'inline'; document.getElementById('2202.07259v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2202.07259v1-abstract-full" style="display: none;"> Fingerprint authentication systems are highly vulnerable to artificial reproductions of fingerprint, called fingerprint presentation attacks. Detecting presentation attacks is not trivial because attackers refine their replication techniques from year to year. The International Fingerprint liveness Detection Competition (LivDet), an open and well-acknowledged meeting point of academies and private companies that deal with the problem of presentation attack detection, has the goal to assess the performance of fingerprint presentation attack detection (FPAD) algorithms by using standard experimental protocols and data sets. Each LivDet edition, held biannually since 2009, is characterized by a different set of challenges against which competitors must be dealt with. The continuous increase of competitors and the noticeable decrease in error rates across competitions demonstrate a growing interest in the topic. This paper reviews the LivDet editions from 2009 to 2021 and points out their evolution over the years. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.07259v1-abstract-full').style.display = 'none'; document.getElementById('2202.07259v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Chapter of the Handbook of Biometric Anti-Spoofing (Third Edition)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.10567">arXiv:2110.10567</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.10567">pdf</a>, <a href="https://arxiv.org/format/2110.10567">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TIFS.2021.3121201">10.1109/TIFS.2021.3121201 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Fingerprint recognition with embedded presentation attacks detection: are we ready? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Micheletto%2C+M">Marco Micheletto</a>, <a href="/search/cs?searchtype=author&amp;query=Marcialis%2C+G+L">Gian Luca Marcialis</a>, <a href="/search/cs?searchtype=author&amp;query=Orr%C3%B9%2C+G">Giulia Orr霉</a>, <a href="/search/cs?searchtype=author&amp;query=Roli%2C+F">Fabio Roli</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.10567v1-abstract-short" style="display: inline;"> The diffusion of fingerprint verification systems for security applications makes it urgent to investigate the embedding of software-based presentation attack detection algorithms (PAD) into such systems. Companies and institutions need to know whether such integration would make the system more &#34;secure&#34; and whether the technology available is ready, and, if so, at what operational working conditi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.10567v1-abstract-full').style.display = 'inline'; document.getElementById('2110.10567v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.10567v1-abstract-full" style="display: none;"> The diffusion of fingerprint verification systems for security applications makes it urgent to investigate the embedding of software-based presentation attack detection algorithms (PAD) into such systems. Companies and institutions need to know whether such integration would make the system more &#34;secure&#34; and whether the technology available is ready, and, if so, at what operational working conditions. Despite significant improvements, especially by adopting deep learning approaches to fingerprint PAD, current research did not state much about their effectiveness when embedded in fingerprint verification systems. We believe that the lack of works is explained by the lack of instruments to investigate the problem, that is, modeling the cause-effect relationships when two non-zero error-free systems work together. Accordingly, this paper explores the fusion of PAD into verification systems by proposing a novel investigation instrument: a performance simulator based on the probabilistic modeling of the relationships among the Receiver Operating Characteristics (ROC) of the two individual systems when PAD and verification stages are implemented sequentially. As a matter of fact, this is the most straightforward, flexible, and widespread approach. We carry out simulations on the PAD algorithms&#39; ROCs submitted to the most recent editions of LivDet (2017-2019), the state-of-the-art NIST Bozorth3, and the top-level Veryfinger 12 matchers. Reported experiments explore significant scenarios to get the conditions under which fingerprint matching with embedded PAD can improve, rather than degrade, the overall personal verification performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.10567v1-abstract-full').style.display = 'none'; document.getElementById('2110.10567v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEEE Transactions on Information Forensics and Security (2021) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2108.10183">arXiv:2108.10183</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2108.10183">pdf</a>, <a href="https://arxiv.org/format/2108.10183">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/IJCB52358.2021.9484399">10.1109/IJCB52358.2021.9484399 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> LivDet 2021 Fingerprint Liveness Detection Competition -- Into the unknown </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Casula%2C+R">Roberto Casula</a>, <a href="/search/cs?searchtype=author&amp;query=Micheletto%2C+M">Marco Micheletto</a>, <a href="/search/cs?searchtype=author&amp;query=Orr%C3%B9%2C+G">Giulia Orr霉</a>, <a href="/search/cs?searchtype=author&amp;query=Delussu%2C+R">Rita Delussu</a>, <a href="/search/cs?searchtype=author&amp;query=Concas%2C+S">Sara Concas</a>, <a href="/search/cs?searchtype=author&amp;query=Panzino%2C+A">Andrea Panzino</a>, <a href="/search/cs?searchtype=author&amp;query=Marcialis%2C+G+L">Gian Luca Marcialis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2108.10183v1-abstract-short" style="display: inline;"> The International Fingerprint Liveness Detection Competition is an international biennial competition open to academia and industry with the aim to assess and report advances in Fingerprint Presentation Attack Detection. The proposed &#34;Liveness Detection in Action&#34; and &#34;Fingerprint representation&#34; challenges were aimed to evaluate the impact of a PAD embedded into a verification system, and the eff&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.10183v1-abstract-full').style.display = 'inline'; document.getElementById('2108.10183v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2108.10183v1-abstract-full" style="display: none;"> The International Fingerprint Liveness Detection Competition is an international biennial competition open to academia and industry with the aim to assess and report advances in Fingerprint Presentation Attack Detection. The proposed &#34;Liveness Detection in Action&#34; and &#34;Fingerprint representation&#34; challenges were aimed to evaluate the impact of a PAD embedded into a verification system, and the effectiveness and compactness of feature sets for mobile applications. Furthermore, we experimented a new spoof fabrication method that has particularly affected the final results. Twenty-three algorithms were submitted to the competition, the maximum number ever achieved by LivDet. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.10183v1-abstract-full').style.display = 'none'; document.getElementById('2108.10183v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Preprint version of a paper accepted at IJCB 2021</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> 2021 IEEE International Joint Conference on Biometrics (IJCB), 2021, pp. 1-6 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2010.06412">arXiv:2010.06412</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2010.06412">pdf</a>, <a href="https://arxiv.org/format/2010.06412">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Electroencephalography signal processing based on textural features for monitoring the driver&#39;s state by a Brain-Computer Interface </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Orr%C3%B9%2C+G">Giulia Orr霉</a>, <a href="/search/cs?searchtype=author&amp;query=Micheletto%2C+M">Marco Micheletto</a>, <a href="/search/cs?searchtype=author&amp;query=Terranova%2C+F">Fabio Terranova</a>, <a href="/search/cs?searchtype=author&amp;query=Marcialis%2C+G+L">Gian Luca Marcialis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2010.06412v2-abstract-short" style="display: inline;"> In this study we investigate a textural processing method of electroencephalography (EEG) signal as an indicator to estimate the driver&#39;s vigilance in a hypothetical Brain-Computer Interface (BCI) system. The novelty of the solution proposed relies on employing the one-dimensional Local Binary Pattern (1D-LBP) algorithm for feature extraction from pre-processed EEG data. From the resulting feature&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.06412v2-abstract-full').style.display = 'inline'; document.getElementById('2010.06412v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2010.06412v2-abstract-full" style="display: none;"> In this study we investigate a textural processing method of electroencephalography (EEG) signal as an indicator to estimate the driver&#39;s vigilance in a hypothetical Brain-Computer Interface (BCI) system. The novelty of the solution proposed relies on employing the one-dimensional Local Binary Pattern (1D-LBP) algorithm for feature extraction from pre-processed EEG data. From the resulting feature vector, the classification is done according to three vigilance classes: awake, tired and drowsy. The claim is that the class transitions can be detected by describing the variations of the micro-patterns&#39; occurrences along the EEG signal. The 1D-LBP is able to describe them by detecting mutual variations of the signal temporarily &#34;close&#34; as a short bit-code. Our analysis allows to conclude that the 1D-LBP adoption has led to significant performance improvement. Moreover, capturing the class transitions from the EEG signal is effective, although the overall performance is not yet good enough to develop a BCI for assessing the driver&#39;s vigilance in real environments. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.06412v2-abstract-full').style.display = 'none'; document.getElementById('2010.06412v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for the 25th International Conference on Pattern Recognition (ICPR 2020)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2010.06407">arXiv:2010.06407</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2010.06407">pdf</a>, <a href="https://arxiv.org/format/2010.06407">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Detecting Anomalies from Video-Sequences: a Novel Descriptor </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Orr%C3%B9%2C+G">Giulia Orr霉</a>, <a href="/search/cs?searchtype=author&amp;query=Ghiani%2C+D">Davide Ghiani</a>, <a href="/search/cs?searchtype=author&amp;query=Pintor%2C+M">Maura Pintor</a>, <a href="/search/cs?searchtype=author&amp;query=Marcialis%2C+G+L">Gian Luca Marcialis</a>, <a href="/search/cs?searchtype=author&amp;query=Roli%2C+F">Fabio Roli</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2010.06407v2-abstract-short" style="display: inline;"> We present a novel descriptor for crowd behavior analysis and anomaly detection. The goal is to measure by appropriate patterns the speed of formation and disintegration of groups in the crowd. This descriptor is inspired by the concept of one-dimensional local binary patterns: in our case, such patterns depend on the number of group observed in a time window. An appropriate measurement unit, name&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.06407v2-abstract-full').style.display = 'inline'; document.getElementById('2010.06407v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2010.06407v2-abstract-full" style="display: none;"> We present a novel descriptor for crowd behavior analysis and anomaly detection. The goal is to measure by appropriate patterns the speed of formation and disintegration of groups in the crowd. This descriptor is inspired by the concept of one-dimensional local binary patterns: in our case, such patterns depend on the number of group observed in a time window. An appropriate measurement unit, named &#34;trit&#34; (trinary digit), represents three possible dynamic states of groups on a certain frame. Our hypothesis is that abrupt variations of the groups&#39; number may be due to an anomalous event that can be accordingly detected, by translating these variations on temporal trit-based sequence of strings which are significantly different from the one describing the &#34;no-anomaly&#34; one. Due to the peculiarity of the rationale behind this work, relying on the number of groups, three different methods of people group&#39;s extraction are compared. Experiments are carried out on the Motion-Emotion benchmark data set. Reported results point out in which cases the trit-based measurement of group dynamics allows us to detect the anomaly. Besides the promising performance of our approach, we show how it is correlated with the anomaly typology and the camera&#39;s perspective to the crowd&#39;s flow (frontal, lateral). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.06407v2-abstract-full').style.display = 'none'; document.getElementById('2010.06407v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for the 25th International Conference on Pattern Recognition (ICPR 2020)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2010.04072">arXiv:2010.04072</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2010.04072">pdf</a>, <a href="https://arxiv.org/format/2010.04072">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/IPAS50080.2020.9334946">10.1109/IPAS50080.2020.9334946 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Are Adaptive Face Recognition Systems still Necessary? Experiments on the APE Dataset </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Orr%C3%B9%2C+G">Giulia Orr霉</a>, <a href="/search/cs?searchtype=author&amp;query=Micheletto%2C+M">Marco Micheletto</a>, <a href="/search/cs?searchtype=author&amp;query=Fierrez%2C+J">Julian Fierrez</a>, <a href="/search/cs?searchtype=author&amp;query=Marcialis%2C+G+L">Gian Luca Marcialis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2010.04072v2-abstract-short" style="display: inline;"> In the last five years, deep learning methods, in particular CNN, have attracted considerable attention in the field of face-based recognition, achieving impressive results. Despite this progress, it is not yet clear precisely to what extent deep features are able to follow all the intra-class variations that the face can present over time. In this paper we investigate the performance the performa&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.04072v2-abstract-full').style.display = 'inline'; document.getElementById('2010.04072v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2010.04072v2-abstract-full" style="display: none;"> In the last five years, deep learning methods, in particular CNN, have attracted considerable attention in the field of face-based recognition, achieving impressive results. Despite this progress, it is not yet clear precisely to what extent deep features are able to follow all the intra-class variations that the face can present over time. In this paper we investigate the performance the performance improvement of face recognition systems by adopting self updating strategies of the face templates. For that purpose, we evaluate the performance of a well-known deep-learning face representation, namely, FaceNet, on a dataset that we generated explicitly conceived to embed intra-class variations of users on a large time span of captures: the APhotoEveryday (APE) dataset. Moreover, we compare these deep features with handcrafted features extracted using the BSIF algorithm. In both cases, we evaluate various template update strategies, in order to detect the most useful for such kind of features. Experimental results show the effectiveness of &#34;optimized&#34; self-update methods with respect to systems without update or random selection of templates. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.04072v2-abstract-full').style.display = 'none'; document.getElementById('2010.04072v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Preprint version of a paper accepted at IPAS 2020 (Fourth IEEE International Conference on Image Processing, Applications and Systems)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> 2020 IEEE 4th International Conference on Image Processing, Applications and Systems (IPAS), Genova, Italy, 2020, pp. 77-82 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2007.03397">arXiv:2007.03397</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2007.03397">pdf</a>, <a href="https://arxiv.org/format/2007.03397">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Are spoofs from latent fingerprints a real threat for the best state-of-art liveness detectors? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Casula%2C+R">Roberto Casula</a>, <a href="/search/cs?searchtype=author&amp;query=Orr%C3%B9%2C+G">Giulia Orr霉</a>, <a href="/search/cs?searchtype=author&amp;query=Angioni%2C+D">Daniele Angioni</a>, <a href="/search/cs?searchtype=author&amp;query=Feng%2C+X">Xiaoyi Feng</a>, <a href="/search/cs?searchtype=author&amp;query=Marcialis%2C+G+L">Gian Luca Marcialis</a>, <a href="/search/cs?searchtype=author&amp;query=Roli%2C+F">Fabio Roli</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2007.03397v2-abstract-short" style="display: inline;"> We investigated the threat level of realistic attacks using latent fingerprints against sensors equipped with state-of-art liveness detectors and fingerprint verification systems which integrate such liveness algorithms. To the best of our knowledge, only a previous investigation was done with spoofs from latent prints. In this paper, we focus on using snapshot pictures of latent fingerprints. The&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.03397v2-abstract-full').style.display = 'inline'; document.getElementById('2007.03397v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2007.03397v2-abstract-full" style="display: none;"> We investigated the threat level of realistic attacks using latent fingerprints against sensors equipped with state-of-art liveness detectors and fingerprint verification systems which integrate such liveness algorithms. To the best of our knowledge, only a previous investigation was done with spoofs from latent prints. In this paper, we focus on using snapshot pictures of latent fingerprints. These pictures provide molds, that allows, after some digital processing, to fabricate high-quality spoofs. Taking a snapshot picture is much simpler than developing fingerprints left on a surface by magnetic powders and lifting the trace by a tape. What we are interested here is to evaluate preliminary at which extent attacks of the kind can be considered a real threat for state-of-art fingerprint liveness detectors and verification systems. To this aim, we collected a novel data set of live and spoof images fabricated with snapshot pictures of latent fingerprints. This data set provide a set of attacks at the most favourable conditions. We refer to this method and the related data set as &#34;ScreenSpoof&#34;. Then, we tested with it the performances of the best liveness detection algorithms, namely, the three winners of the LivDet competition. Reported results point out that the ScreenSpoof method is a threat of the same level, in terms of detection and verification errors, than that of attacks using spoofs fabricated with the full consensus of the victim. We think that this is a notable result, never reported in previous work. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.03397v2-abstract-full').style.display = 'none'; document.getElementById('2007.03397v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for the 25th International Conference on Pattern Recognition (ICPR 2020)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1911.12688">arXiv:1911.12688</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1911.12688">pdf</a>, <a href="https://arxiv.org/format/1911.12688">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.patcog.2019.107121">10.1016/j.patcog.2019.107121 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> A novel classification-selection approach for the self updating of template-based face recognition systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Orr%C3%B9%2C+G">Giulia Orr霉</a>, <a href="/search/cs?searchtype=author&amp;query=Marcialis%2C+G+L">Gian Luca Marcialis</a>, <a href="/search/cs?searchtype=author&amp;query=Roli%2C+F">Fabio Roli</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1911.12688v1-abstract-short" style="display: inline;"> The boosting on the need of security notably increased the amount of possible facial recognition applications, especially due to the success of the Internet of Things (IoT) paradigm. However, although handcrafted and deep learning-inspired facial features reached a significant level of compactness and expressive power, the facial recognition performance still suffers from intra-class variations su&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.12688v1-abstract-full').style.display = 'inline'; document.getElementById('1911.12688v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1911.12688v1-abstract-full" style="display: none;"> The boosting on the need of security notably increased the amount of possible facial recognition applications, especially due to the success of the Internet of Things (IoT) paradigm. However, although handcrafted and deep learning-inspired facial features reached a significant level of compactness and expressive power, the facial recognition performance still suffers from intra-class variations such as ageing, facial expressions, lighting changes, and pose. These variations cannot be captured in a single acquisition and require multiple acquisitions of long duration, which are expensive and need a high level of collaboration from the users. Among others, self-update algorithms have been proposed in order to mitigate these problems. Self-updating aims to add novel templates to the users&#39; gallery among the inputs submitted during system operations. Consequently, computational complexity and storage space tend to be among the critical requirements of these algorithms. The present paper deals with the above problems by a novel template-based self-update algorithm, able to keep over time the expressive power of a limited set of templates stored in the system database. The rationale behind the proposed approach is in the working hypothesis that a dominating mode characterises the features&#39; distribution given the client. Therefore, the key point is to select the best templates around that mode. We propose two methods, which are tested on systems based on handcrafted features and deep-learning-inspired autoencoders at the state-of-the-art. Three benchmark data sets are used. Experimental results confirm that, by effective and compact feature sets which can support our working hypothesis, the proposed classification-selection approaches overcome the problem of manual updating and, in case, stringent computational requirements. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.12688v1-abstract-full').style.display = 'none'; document.getElementById('1911.12688v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 November, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This is an original manuscript of an article published by Elsevier in Pattern Recognition on 27 November 2019. Available online: https://doi.org/10.1016/j.patcog.2019.107121</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 00-01:99-00 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1907.08068">arXiv:1907.08068</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1907.08068">pdf</a>, <a href="https://arxiv.org/format/1907.08068">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-030-30754-7_6">10.1007/978-3-030-30754-7_6 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Analysis of &#34;User-Specific Effect&#34; and Impact of Operator Skills on Fingerprint PAD Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Orr%C3%B9%2C+G">Giulia Orr霉</a>, <a href="/search/cs?searchtype=author&amp;query=Tuveri%2C+P">Pierluigi Tuveri</a>, <a href="/search/cs?searchtype=author&amp;query=Ghiani%2C+L">Luca Ghiani</a>, <a href="/search/cs?searchtype=author&amp;query=Marcialis%2C+G+L">Gian Luca Marcialis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1907.08068v1-abstract-short" style="display: inline;"> Fingerprint Liveness detection, or presentation attacks detection (PAD), that is, the ability of detecting if a fingerprint submitted to an electronic capture device is authentic or made up of some artificial materials, boosted the attention of the scientific community and recently machine learning approaches based on deep networks opened novel scenarios. A significant step ahead was due thanks to&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.08068v1-abstract-full').style.display = 'inline'; document.getElementById('1907.08068v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1907.08068v1-abstract-full" style="display: none;"> Fingerprint Liveness detection, or presentation attacks detection (PAD), that is, the ability of detecting if a fingerprint submitted to an electronic capture device is authentic or made up of some artificial materials, boosted the attention of the scientific community and recently machine learning approaches based on deep networks opened novel scenarios. A significant step ahead was due thanks to the public availability of large sets of data; in particular, the ones released during the International Fingerprint Liveness Detection Competition (LivDet). Among others, the fifth edition carried on in 2017, challenged the participants in two more challenges which were not detailed in the official report. In this paper, we want to extend that report by focusing on them: the first one was aimed at exploring the case in which the PAD is integrated into a fingerprint verification systems, where templates of users are available too and the designer is not constrained to refer only to a generic users population for the PAD settings. The second one faces with the exploitation ability of attackers of the provided fakes, and how this ability impacts on the final performance. These two challenges together may set at which extent the fingerprint presentation attacks are an actual threat and how to exploit additional information to make the PAD more effective. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.08068v1-abstract-full').style.display = 'none'; document.getElementById('1907.08068v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 July, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Preprint version of a paper accepted at BioFor 2019</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> New Trends in Image Analysis and Processing - ICIAP 2019. Lecture Notes in Computer Science, vol 11808. Springer, Cham </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1905.00639">arXiv:1905.00639</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1905.00639">pdf</a>, <a href="https://arxiv.org/format/1905.00639">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ICB45273.2019.8987281">10.1109/ICB45273.2019.8987281 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> LivDet in Action - Fingerprint Liveness Detection Competition 2019 </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Orr%C3%B9%2C+G">Giulia Orr霉</a>, <a href="/search/cs?searchtype=author&amp;query=Casula%2C+R">Roberto Casula</a>, <a href="/search/cs?searchtype=author&amp;query=Tuveri%2C+P">Pierluigi Tuveri</a>, <a href="/search/cs?searchtype=author&amp;query=Bazzoni%2C+C">Carlotta Bazzoni</a>, <a href="/search/cs?searchtype=author&amp;query=Dessalvi%2C+G">Giovanna Dessalvi</a>, <a href="/search/cs?searchtype=author&amp;query=Micheletto%2C+M">Marco Micheletto</a>, <a href="/search/cs?searchtype=author&amp;query=Ghiani%2C+L">Luca Ghiani</a>, <a href="/search/cs?searchtype=author&amp;query=Marcialis%2C+G+L">Gian Luca Marcialis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1905.00639v1-abstract-short" style="display: inline;"> The International Fingerprint liveness Detection Competition (LivDet) is an open and well-acknowledged meeting point of academies and private companies that deal with the problem of distinguishing images coming from reproductions of fingerprints made of artificial materials and images relative to real fingerprints. In this edition of LivDet we invited the competitors to propose integrated algorith&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1905.00639v1-abstract-full').style.display = 'inline'; document.getElementById('1905.00639v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1905.00639v1-abstract-full" style="display: none;"> The International Fingerprint liveness Detection Competition (LivDet) is an open and well-acknowledged meeting point of academies and private companies that deal with the problem of distinguishing images coming from reproductions of fingerprints made of artificial materials and images relative to real fingerprints. In this edition of LivDet we invited the competitors to propose integrated algorithms with matching systems. The goal was to investigate at which extent this integration impact on the whole performance. Twelve algorithms were submitted to the competition, eight of which worked on integrated systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1905.00639v1-abstract-full').style.display = 'none'; document.getElementById('1905.00639v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 May, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Preprint version of a paper accepted at ICB 2019</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1803.05210">arXiv:1803.05210</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1803.05210">pdf</a>, <a href="https://arxiv.org/ps/1803.05210">ps</a>, <a href="https://arxiv.org/format/1803.05210">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> LivDet 2017 Fingerprint Liveness Detection Competition 2017 </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mura%2C+V">Valerio Mura</a>, <a href="/search/cs?searchtype=author&amp;query=Orr%C3%B9%2C+G">Giulia Orr霉</a>, <a href="/search/cs?searchtype=author&amp;query=Casula%2C+R">Roberto Casula</a>, <a href="/search/cs?searchtype=author&amp;query=Sibiriu%2C+A">Alessandra Sibiriu</a>, <a href="/search/cs?searchtype=author&amp;query=Loi%2C+G">Giulia Loi</a>, <a href="/search/cs?searchtype=author&amp;query=Tuveri%2C+P">Pierluigi Tuveri</a>, <a href="/search/cs?searchtype=author&amp;query=Ghiani%2C+L">Luca Ghiani</a>, <a href="/search/cs?searchtype=author&amp;query=Marcialis%2C+G+L">Gian Luca Marcialis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1803.05210v1-abstract-short" style="display: inline;"> Fingerprint Presentation Attack Detection (FPAD) deals with distinguishing images coming from artificial replicas of the fingerprint characteristic, made up of materials like silicone, gelatine or latex, and images coming from alive fingerprints. Images are captured by modern scanners, typically relying on solid-state or optical technologies. Since from 2009, the Fingerprint Liveness Detection Com&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1803.05210v1-abstract-full').style.display = 'inline'; document.getElementById('1803.05210v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1803.05210v1-abstract-full" style="display: none;"> Fingerprint Presentation Attack Detection (FPAD) deals with distinguishing images coming from artificial replicas of the fingerprint characteristic, made up of materials like silicone, gelatine or latex, and images coming from alive fingerprints. Images are captured by modern scanners, typically relying on solid-state or optical technologies. Since from 2009, the Fingerprint Liveness Detection Competition (LivDet) aims to assess the performance of the state-of-the-art algorithms according to a rigorous experimental protocol and, at the same time, a simple overview of the basic achievements. The competition is open to all academics research centers and all companies that work in this field. The positive, increasing trend of the participants number, which supports the success of this initiative, is confirmed even this year: 17 algorithms were submitted to the competition, with a larger involvement of companies and academies. This means that the topic is relevant for both sides, and points out that a lot of work must be done in terms of fundamental and applied research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1803.05210v1-abstract-full').style.display = 'none'; document.getElementById('1803.05210v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 March, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">presented at ICB 2018</span> </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10