CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–35 of 35 results for author: <span class="mathjax">Grangetto, M</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&query=Grangetto%2C+M">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Grangetto, M"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Grangetto%2C+M&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Grangetto, M"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.15611">arXiv:2411.15611</a> <span> [<a href="https://arxiv.org/pdf/2411.15611">pdf</a>, <a href="https://arxiv.org/format/2411.15611">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Knowledge Transfer Across Modalities with Natural Language Supervision </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Barbano%2C+C+A">Carlo Alberto Barbano</a>, <a href="/search/cs?searchtype=author&query=Molinaro%2C+L">Luca Molinaro</a>, <a href="/search/cs?searchtype=author&query=Aiello%2C+E">Emanuele Aiello</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.15611v1-abstract-short" style="display: inline;"> We present a way to learn novel concepts by only using their textual description. We call this method Knowledge Transfer. Similarly to human perception, we leverage cross-modal interaction to introduce new concepts. We hypothesize that in a pre-trained visual encoder there are enough low-level features already learned (e.g. shape, appearance, color) that can be used to describe previously unknown… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15611v1-abstract-full').style.display = 'inline'; document.getElementById('2411.15611v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.15611v1-abstract-full" style="display: none;"> We present a way to learn novel concepts by only using their textual description. We call this method Knowledge Transfer. Similarly to human perception, we leverage cross-modal interaction to introduce new concepts. We hypothesize that in a pre-trained visual encoder there are enough low-level features already learned (e.g. shape, appearance, color) that can be used to describe previously unknown high-level concepts. Provided with a textual description of the novel concept, our method works by aligning the known low-level features of the visual encoder to its high-level textual description. We show that Knowledge Transfer can successfully introduce novel concepts in multimodal models, in a very efficient manner, by only requiring a single description of the target concept. Our approach is compatible with both separate textual and visual encoders (e.g. CLIP) and shared parameters across modalities. We also show that, following the same principle, Knowledge Transfer can improve concepts already known by the model. Leveraging Knowledge Transfer we improve zero-shot performance across different tasks such as classification, segmentation, image-text retrieval, and captioning. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.15611v1-abstract-full').style.display = 'none'; document.getElementById('2411.15611v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">21 pages, 7 figures, 17 tables</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68T45 (Primary) 68T50 (Secondary) <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.6 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.10185">arXiv:2411.10185</a> <span> [<a href="https://arxiv.org/pdf/2411.10185">pdf</a>, <a href="https://arxiv.org/format/2411.10185">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Efficient Progressive Image Compression with Variance-aware Masking </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Presta%2C+A">Alberto Presta</a>, <a href="/search/cs?searchtype=author&query=Tartaglione%2C+E">Enzo Tartaglione</a>, <a href="/search/cs?searchtype=author&query=Fiandrotti%2C+A">Attilio Fiandrotti</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a>, <a href="/search/cs?searchtype=author&query=Cosman%2C+P">Pamela Cosman</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.10185v2-abstract-short" style="display: inline;"> Learned progressive image compression is gaining momentum as it allows improved image reconstruction as more bits are decoded at the receiver. We propose a progressive image compression method in which an image is first represented as a pair of base-quality and top-quality latent representations. Next, a residual latent representation is encoded as the element-wise difference between the top and b… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10185v2-abstract-full').style.display = 'inline'; document.getElementById('2411.10185v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.10185v2-abstract-full" style="display: none;"> Learned progressive image compression is gaining momentum as it allows improved image reconstruction as more bits are decoded at the receiver. We propose a progressive image compression method in which an image is first represented as a pair of base-quality and top-quality latent representations. Next, a residual latent representation is encoded as the element-wise difference between the top and base representations. Our scheme enables progressive image compression with element-wise granularity by introducing a masking system that ranks each element of the residual latent representation from most to least important, dividing it into complementary components, which can be transmitted separately to the decoder in order to obtain different reconstruction quality. The masking system does not add further parameters nor complexity. At the receiver, any elements of the top latent representation excluded from the transmitted components can be independently replaced with the mean predicted by the hyperprior architecture, ensuring reliable reconstructions at any intermediate quality level. We also introduced Rate Enhancement Modules (REMs), which refine the estimation of entropy parameters using already decoded components. We obtain results competitive with state-of-the-art competitors, while significantly reducing computational complexity, decoding time, and number of parameters. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.10185v2-abstract-full').style.display = 'none'; document.getElementById('2411.10185v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages. Accepted at WACV 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.02981">arXiv:2410.02981</a> <span> [<a href="https://arxiv.org/pdf/2410.02981">pdf</a>, <a href="https://arxiv.org/format/2410.02981">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> GABIC: Graph-based Attention Block for Image Compression </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Spadaro%2C+G">Gabriele Spadaro</a>, <a href="/search/cs?searchtype=author&query=Presta%2C+A">Alberto Presta</a>, <a href="/search/cs?searchtype=author&query=Tartaglione%2C+E">Enzo Tartaglione</a>, <a href="/search/cs?searchtype=author&query=Giraldo%2C+J+H">Jhony H. Giraldo</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a>, <a href="/search/cs?searchtype=author&query=Fiandrotti%2C+A">Attilio Fiandrotti</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.02981v1-abstract-short" style="display: inline;"> While standardized codecs like JPEG and HEVC-intra represent the industry standard in image compression, neural Learned Image Compression (LIC) codecs represent a promising alternative. In detail, integrating attention mechanisms from Vision Transformers into LIC models has shown improved compression efficiency. However, extra efficiency often comes at the cost of aggregating redundant features. T… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.02981v1-abstract-full').style.display = 'inline'; document.getElementById('2410.02981v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.02981v1-abstract-full" style="display: none;"> While standardized codecs like JPEG and HEVC-intra represent the industry standard in image compression, neural Learned Image Compression (LIC) codecs represent a promising alternative. In detail, integrating attention mechanisms from Vision Transformers into LIC models has shown improved compression efficiency. However, extra efficiency often comes at the cost of aggregating redundant features. This work proposes a Graph-based Attention Block for Image Compression (GABIC), a method to reduce feature redundancy based on a k-Nearest Neighbors enhanced attention mechanism. Our experiments show that GABIC outperforms comparable methods, particularly at high bit rates, enhancing compression performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.02981v1-abstract-full').style.display = 'none'; document.getElementById('2410.02981v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 5 figures, accepted at ICIP 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.00807">arXiv:2410.00807</a> <span> [<a href="https://arxiv.org/pdf/2410.00807">pdf</a>, <a href="https://arxiv.org/format/2410.00807">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> WiGNet: Windowed Vision Graph Neural Network </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Spadaro%2C+G">Gabriele Spadaro</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a>, <a href="/search/cs?searchtype=author&query=Fiandrotti%2C+A">Attilio Fiandrotti</a>, <a href="/search/cs?searchtype=author&query=Tartaglione%2C+E">Enzo Tartaglione</a>, <a href="/search/cs?searchtype=author&query=Giraldo%2C+J+H">Jhony H. Giraldo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.00807v1-abstract-short" style="display: inline;"> In recent years, Graph Neural Networks (GNNs) have demonstrated strong adaptability to various real-world challenges, with architectures such as Vision GNN (ViG) achieving state-of-the-art performance in several computer vision tasks. However, their practical applicability is hindered by the computational complexity of constructing the graph, which scales quadratically with the image size. In this… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.00807v1-abstract-full').style.display = 'inline'; document.getElementById('2410.00807v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.00807v1-abstract-full" style="display: none;"> In recent years, Graph Neural Networks (GNNs) have demonstrated strong adaptability to various real-world challenges, with architectures such as Vision GNN (ViG) achieving state-of-the-art performance in several computer vision tasks. However, their practical applicability is hindered by the computational complexity of constructing the graph, which scales quadratically with the image size. In this paper, we introduce a novel Windowed vision Graph neural Network (WiGNet) model for efficient image processing. WiGNet explores a different strategy from previous works by partitioning the image into windows and constructing a graph within each window. Therefore, our model uses graph convolutions instead of the typical 2D convolution or self-attention mechanism. WiGNet effectively manages computational and memory complexity for large image sizes. We evaluate our method in the ImageNet-1k benchmark dataset and test the adaptability of WiGNet using the CelebA-HQ dataset as a downstream task with higher-resolution images. In both of these scenarios, our method achieves competitive results compared to previous vision GNNs while keeping memory and computational complexity at bay. WiGNet offers a promising solution toward the deployment of vision GNNs in real-world applications. We publicly released the code at https://github.com/EIDOSLAB/WiGNet. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.00807v1-abstract-full').style.display = 'none'; document.getElementById('2410.00807v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.00557">arXiv:2410.00557</a> <span> [<a href="https://arxiv.org/pdf/2410.00557">pdf</a>, <a href="https://arxiv.org/format/2410.00557">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> </div> </div> <p class="title is-5 mathjax"> STanH : Parametric Quantization for Variable Rate Learned Image Compression </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Presta%2C+A">Alberto Presta</a>, <a href="/search/cs?searchtype=author&query=Tartaglione%2C+E">Enzo Tartaglione</a>, <a href="/search/cs?searchtype=author&query=Fiandrotti%2C+A">Attilio Fiandrotti</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.00557v2-abstract-short" style="display: inline;"> In end-to-end learned image compression, encoder and decoder are jointly trained to minimize a $R + 位D$ cost function, where $位$ controls the trade-off between rate of the quantized latent representation and image quality. Unfortunately, a distinct encoder-decoder pair with millions of parameters must be trained for each $位$, hence the need to switch encoders and to store multiple encoders and dec… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.00557v2-abstract-full').style.display = 'inline'; document.getElementById('2410.00557v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.00557v2-abstract-full" style="display: none;"> In end-to-end learned image compression, encoder and decoder are jointly trained to minimize a $R + 位D$ cost function, where $位$ controls the trade-off between rate of the quantized latent representation and image quality. Unfortunately, a distinct encoder-decoder pair with millions of parameters must be trained for each $位$, hence the need to switch encoders and to store multiple encoders and decoders on the user device for every target rate. This paper proposes to exploit a differentiable quantizer designed around a parametric sum of hyperbolic tangents, called STanH , that relaxes the step-wise quantization function. STanH is implemented as a differentiable activation layer with learnable quantization parameters that can be plugged into a pre-trained fixed rate model and refined to achieve different target bitrates. Experimental results show that our method enables variable rate coding with comparable efficiency to the state-of-the-art, yet with significant savings in terms of ease of deployment, training time, and storage costs <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.00557v2-abstract-full').style.display = 'none'; document.getElementById('2410.00557v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 1 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submitted to IEEE Transactions on Image Processing</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.07079">arXiv:2408.07079</a> <span> [<a href="https://arxiv.org/pdf/2408.07079">pdf</a>, <a href="https://arxiv.org/format/2408.07079">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Anatomical Foundation Models for Brain MRIs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Barbano%2C+C+A">Carlo Alberto Barbano</a>, <a href="/search/cs?searchtype=author&query=Brunello%2C+M">Matteo Brunello</a>, <a href="/search/cs?searchtype=author&query=Dufumier%2C+B">Benoit Dufumier</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.07079v3-abstract-short" style="display: inline;"> Deep Learning (DL) in neuroimaging has become increasingly relevant for detecting neurological conditions and neurodegenerative disorders. One of the most predominant biomarkers in neuroimaging is represented by brain age, which has been shown to be a good indicator for different conditions, such as Alzheimer's Disease. Using brain age for weakly supervised pre-training of DL models in transfer le… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.07079v3-abstract-full').style.display = 'inline'; document.getElementById('2408.07079v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.07079v3-abstract-full" style="display: none;"> Deep Learning (DL) in neuroimaging has become increasingly relevant for detecting neurological conditions and neurodegenerative disorders. One of the most predominant biomarkers in neuroimaging is represented by brain age, which has been shown to be a good indicator for different conditions, such as Alzheimer's Disease. Using brain age for weakly supervised pre-training of DL models in transfer learning settings has also recently shown promising results, especially when dealing with data scarcity of different conditions. On the other hand, anatomical information of brain MRIs (e.g. cortical thickness) can provide important information for learning good representations that can be transferred to many downstream tasks. In this work, we propose AnatCL, an anatomical foundation model for brain MRIs that i.) leverages anatomical information in a weakly contrastive learning approach, and ii.) achieves state-of-the-art performances across many different downstream tasks. To validate our approach we consider 12 different downstream tasks for the diagnosis of different conditions such as Alzheimer's Disease, autism spectrum disorder, and schizophrenia. Furthermore, we also target the prediction of 10 different clinical assessment scores using structural MRI data. Our findings show that incorporating anatomical information during pre-training leads to more robust and generalizable representations. Pre-trained models can be found at: https://github.com/EIDOSLAB/AnatCL. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.07079v3-abstract-full').style.display = 'none'; document.getElementById('2408.07079v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Updated version; added ablation study</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68T07 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.6 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.10389">arXiv:2407.10389</a> <span> [<a href="https://arxiv.org/pdf/2407.10389">pdf</a>, <a href="https://arxiv.org/format/2407.10389">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Boost Your NeRF: A Model-Agnostic Mixture of Experts Framework for High Quality and Efficient Rendering </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Di+Sario%2C+F">Francesco Di Sario</a>, <a href="/search/cs?searchtype=author&query=Renzulli%2C+R">Riccardo Renzulli</a>, <a href="/search/cs?searchtype=author&query=Tartaglione%2C+E">Enzo Tartaglione</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.10389v3-abstract-short" style="display: inline;"> Since the introduction of NeRFs, considerable attention has been focused on improving their training and inference times, leading to the development of Fast-NeRFs models. Despite demonstrating impressive rendering speed and quality, the rapid convergence of such models poses challenges for further improving reconstruction quality. Common strategies to improve rendering quality involves augmenting… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.10389v3-abstract-full').style.display = 'inline'; document.getElementById('2407.10389v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.10389v3-abstract-full" style="display: none;"> Since the introduction of NeRFs, considerable attention has been focused on improving their training and inference times, leading to the development of Fast-NeRFs models. Despite demonstrating impressive rendering speed and quality, the rapid convergence of such models poses challenges for further improving reconstruction quality. Common strategies to improve rendering quality involves augmenting model parameters or increasing the number of sampled points. However, these computationally intensive approaches encounter limitations in achieving significant quality enhancements. This study introduces a model-agnostic framework inspired by Sparsely-Gated Mixture of Experts to enhance rendering quality without escalating computational complexity. Our approach enables specialization in rendering different scene components by employing a mixture of experts with varying resolutions. We present a novel gate formulation designed to maximize expert capabilities and propose a resolution-based routing technique to effectively induce sparsity and decompose scenes. Our work significantly improves reconstruction quality while maintaining competitive performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.10389v3-abstract-full').style.display = 'none'; document.getElementById('2407.10389v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The paper has been accepted to the ECCV 2024 conference</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.02077">arXiv:2406.02077</a> <span> [<a href="https://arxiv.org/pdf/2406.02077">pdf</a>, <a href="https://arxiv.org/format/2406.02077">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Multi-target stain normalization for histology slides </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ivanov%2C+D">Desislav Ivanov</a>, <a href="/search/cs?searchtype=author&query=Barbano%2C+C+A">Carlo Alberto Barbano</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.02077v3-abstract-short" style="display: inline;"> Traditional staining normalization approaches, e.g. Macenko, typically rely on the choice of a single representative reference image, which may not adequately account for the diverse staining patterns of datasets collected in practical scenarios. In this study, we introduce a novel approach that leverages multiple reference images to enhance robustness against stain variation. Our method is parame… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.02077v3-abstract-full').style.display = 'inline'; document.getElementById('2406.02077v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.02077v3-abstract-full" style="display: none;"> Traditional staining normalization approaches, e.g. Macenko, typically rely on the choice of a single representative reference image, which may not adequately account for the diverse staining patterns of datasets collected in practical scenarios. In this study, we introduce a novel approach that leverages multiple reference images to enhance robustness against stain variation. Our method is parameter-free and can be adopted in existing computational pathology pipelines with no significant changes. We evaluate the effectiveness of our method through experiments using a deep-learning pipeline for automatic nuclei segmentation on colorectal images. Our results show that by leveraging multiple reference images, better results can be achieved when generalizing to external data, where the staining can widely differ from the training set. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.02077v3-abstract-full').style.display = 'none'; document.getElementById('2406.02077v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68U10 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.4.0 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.00772">arXiv:2406.00772</a> <span> [<a href="https://arxiv.org/pdf/2406.00772">pdf</a>, <a href="https://arxiv.org/format/2406.00772">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Unsupervised Contrastive Analysis for Salient Pattern Detection using Conditional Diffusion Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Patr%C3%ADcio%2C+C">Cristiano Patr铆cio</a>, <a href="/search/cs?searchtype=author&query=Barbano%2C+C+A">Carlo Alberto Barbano</a>, <a href="/search/cs?searchtype=author&query=Fiandrotti%2C+A">Attilio Fiandrotti</a>, <a href="/search/cs?searchtype=author&query=Renzulli%2C+R">Riccardo Renzulli</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a>, <a href="/search/cs?searchtype=author&query=Teixeira%2C+L+F">Luis F. Teixeira</a>, <a href="/search/cs?searchtype=author&query=Neves%2C+J+C">Jo茫o C. Neves</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.00772v2-abstract-short" style="display: inline;"> Contrastive Analysis (CA) regards the problem of identifying patterns in images that allow distinguishing between a background (BG) dataset (i.e. healthy subjects) and a target (TG) dataset (i.e. unhealthy subjects). Recent works on this topic rely on variational autoencoders (VAE) or contrastive learning strategies to learn the patterns that separate TG samples from BG samples in a supervised man… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.00772v2-abstract-full').style.display = 'inline'; document.getElementById('2406.00772v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.00772v2-abstract-full" style="display: none;"> Contrastive Analysis (CA) regards the problem of identifying patterns in images that allow distinguishing between a background (BG) dataset (i.e. healthy subjects) and a target (TG) dataset (i.e. unhealthy subjects). Recent works on this topic rely on variational autoencoders (VAE) or contrastive learning strategies to learn the patterns that separate TG samples from BG samples in a supervised manner. However, the dependency on target (unhealthy) samples can be challenging in medical scenarios due to their limited availability. Also, the blurred reconstructions of VAEs lack utility and interpretability. In this work, we redefine the CA task by employing a self-supervised contrastive encoder to learn a latent representation encoding only common patterns from input images, using samples exclusively from the BG dataset during training, and approximating the distribution of the target patterns by leveraging data augmentation techniques. Subsequently, we exploit state-of-the-art generative methods, i.e. diffusion models, conditioned on the learned latent representation to produce a realistic (healthy) version of the input image encoding solely the common patterns. Thorough validation on a facial image dataset and experiments across three brain MRI datasets demonstrate that conditioning the generative process of state-of-the-art generative methods with the latent representation from our self-supervised contrastive encoder yields improvements in the generated image quality and in the accuracy of image classification. The code is available at https://github.com/CristianoPatricio/unsupervised-contrastive-cond-diff. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.00772v2-abstract-full').style.display = 'none'; document.getElementById('2406.00772v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 11 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.11598">arXiv:2405.11598</a> <span> [<a href="https://arxiv.org/pdf/2405.11598">pdf</a>, <a href="https://arxiv.org/format/2405.11598">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> AI-Assisted Diagnosis for Covid-19 CXR Screening: From Data Collection to Clinical Validation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Barbano%2C+C+A">Carlo Alberto Barbano</a>, <a href="/search/cs?searchtype=author&query=Renzulli%2C+R">Riccardo Renzulli</a>, <a href="/search/cs?searchtype=author&query=Grosso%2C+M">Marco Grosso</a>, <a href="/search/cs?searchtype=author&query=Basile%2C+D">Domenico Basile</a>, <a href="/search/cs?searchtype=author&query=Busso%2C+M">Marco Busso</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.11598v1-abstract-short" style="display: inline;"> In this paper, we present the major results from the Covid Radiographic imaging System based on AI (Co.R.S.A.) project, which took place in Italy. This project aims to develop a state-of-the-art AI-based system for diagnosing Covid-19 pneumonia from Chest X-ray (CXR) images. The contributions of this work are manyfold: the release of the public CORDA dataset, a deep learning pipeline for Covid-19… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.11598v1-abstract-full').style.display = 'inline'; document.getElementById('2405.11598v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.11598v1-abstract-full" style="display: none;"> In this paper, we present the major results from the Covid Radiographic imaging System based on AI (Co.R.S.A.) project, which took place in Italy. This project aims to develop a state-of-the-art AI-based system for diagnosing Covid-19 pneumonia from Chest X-ray (CXR) images. The contributions of this work are manyfold: the release of the public CORDA dataset, a deep learning pipeline for Covid-19 detection, and the clinical validation of the developed solution by expert radiologists. The proposed detection model is based on a two-step approach that, paired with state-of-the-art debiasing, provides reliable results. Most importantly, our investigation includes the actual usage of the diagnosis aid tool by radiologists, allowing us to assess the real benefits in terms of accuracy and time efficiency. Project homepage: https://corsa.di.unito.it/ <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.11598v1-abstract-full').style.display = 'none'; document.getElementById('2405.11598v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at 21st IEEE International Symposium on Biomedical Imaging (ISBI)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68T07 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.1; I.4.0 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.15591">arXiv:2404.15591</a> <span> [<a href="https://arxiv.org/pdf/2404.15591">pdf</a>, <a href="https://arxiv.org/format/2404.15591">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Domain Adaptation for Learned Image Compression with Supervised Adapters </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Presta%2C+A">Alberto Presta</a>, <a href="/search/cs?searchtype=author&query=Spadaro%2C+G">Gabriele Spadaro</a>, <a href="/search/cs?searchtype=author&query=Tartaglione%2C+E">Enzo Tartaglione</a>, <a href="/search/cs?searchtype=author&query=Fiandrotti%2C+A">Attilio Fiandrotti</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.15591v1-abstract-short" style="display: inline;"> In Learned Image Compression (LIC), a model is trained at encoding and decoding images sampled from a source domain, often outperforming traditional codecs on natural images; yet its performance may be far from optimal on images sampled from different domains. In this work, we tackle the problem of adapting a pre-trained model to multiple target domains by plugging into the decoder an adapter modu… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.15591v1-abstract-full').style.display = 'inline'; document.getElementById('2404.15591v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.15591v1-abstract-full" style="display: none;"> In Learned Image Compression (LIC), a model is trained at encoding and decoding images sampled from a source domain, often outperforming traditional codecs on natural images; yet its performance may be far from optimal on images sampled from different domains. In this work, we tackle the problem of adapting a pre-trained model to multiple target domains by plugging into the decoder an adapter module for each of them, including the source one. Each adapter improves the decoder performance on a specific domain, without the model forgetting about the images seen at training time. A gate network computes the weights to optimally blend the contributions from the adapters when the bitstream is decoded. We experimentally validate our method over two state-of-the-art pre-trained models, observing improved rate-distortion efficiency on the target domains without penalties on the source domain. Furthermore, the gate's ability to find similarities with the learned target domains enables better encoding efficiency also for images outside them. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.15591v1-abstract-full').style.display = 'none'; document.getElementById('2404.15591v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, published to Data compression conference 2024 (DCC2024)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.18756">arXiv:2403.18756</a> <span> [<a href="https://arxiv.org/pdf/2403.18756">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Detection of subclinical atherosclerosis by image-based deep learning on chest x-ray </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gallone%2C+G">Guglielmo Gallone</a>, <a href="/search/cs?searchtype=author&query=Iodice%2C+F">Francesco Iodice</a>, <a href="/search/cs?searchtype=author&query=Presta%2C+A">Alberto Presta</a>, <a href="/search/cs?searchtype=author&query=Tore%2C+D">Davide Tore</a>, <a href="/search/cs?searchtype=author&query=de+Filippo%2C+O">Ovidio de Filippo</a>, <a href="/search/cs?searchtype=author&query=Visciano%2C+M">Michele Visciano</a>, <a href="/search/cs?searchtype=author&query=Barbano%2C+C+A">Carlo Alberto Barbano</a>, <a href="/search/cs?searchtype=author&query=Serafini%2C+A">Alessandro Serafini</a>, <a href="/search/cs?searchtype=author&query=Gorrini%2C+P">Paola Gorrini</a>, <a href="/search/cs?searchtype=author&query=Bruno%2C+A">Alessandro Bruno</a>, <a href="/search/cs?searchtype=author&query=Marra%2C+W+G">Walter Grosso Marra</a>, <a href="/search/cs?searchtype=author&query=Hughes%2C+J">James Hughes</a>, <a href="/search/cs?searchtype=author&query=Iannaccone%2C+M">Mario Iannaccone</a>, <a href="/search/cs?searchtype=author&query=Fonio%2C+P">Paolo Fonio</a>, <a href="/search/cs?searchtype=author&query=Fiandrotti%2C+A">Attilio Fiandrotti</a>, <a href="/search/cs?searchtype=author&query=Depaoli%2C+A">Alessandro Depaoli</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a>, <a href="/search/cs?searchtype=author&query=de+Ferrari%2C+G+M">Gaetano Maria de Ferrari</a>, <a href="/search/cs?searchtype=author&query=D%27Ascenzo%2C+F">Fabrizio D'Ascenzo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.18756v1-abstract-short" style="display: inline;"> Aims. To develop a deep-learning based system for recognition of subclinical atherosclerosis on a plain frontal chest x-ray. Methods and Results. A deep-learning algorithm to predict coronary artery calcium (CAC) score (the AI-CAC model) was developed on 460 chest x-ray (80% training cohort, 20% internal validation cohort) of primary prevention patients (58.4% male, median age 63 [51-74] years) wi… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.18756v1-abstract-full').style.display = 'inline'; document.getElementById('2403.18756v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.18756v1-abstract-full" style="display: none;"> Aims. To develop a deep-learning based system for recognition of subclinical atherosclerosis on a plain frontal chest x-ray. Methods and Results. A deep-learning algorithm to predict coronary artery calcium (CAC) score (the AI-CAC model) was developed on 460 chest x-ray (80% training cohort, 20% internal validation cohort) of primary prevention patients (58.4% male, median age 63 [51-74] years) with available paired chest x-ray and chest computed tomography (CT) indicated for any clinical reason and performed within 3 months. The CAC score calculated on chest CT was used as ground truth. The model was validated on an temporally-independent cohort of 90 patients from the same institution (external validation). The diagnostic accuracy of the AI-CAC model assessed by the area under the curve (AUC) was the primary outcome. Overall, median AI-CAC score was 35 (0-388) and 28.9% patients had no AI-CAC. AUC of the AI-CAC model to identify a CAC>0 was 0.90 in the internal validation cohort and 0.77 in the external validation cohort. Sensitivity was consistently above 92% in both cohorts. In the overall cohort (n=540), among patients with AI-CAC=0, a single ASCVD event occurred, after 4.3 years. Patients with AI-CAC>0 had significantly higher Kaplan Meier estimates for ASCVD events (13.5% vs. 3.4%, log-rank=0.013). Conclusion. The AI-CAC model seems to accurately detect subclinical atherosclerosis on chest x-ray with elevated sensitivity, and to predict ASCVD events with elevated negative predictive value. Adoption of the AI-CAC model to refine CV risk stratification or as an opportunistic screening tool requires prospective evaluation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.18756v1-abstract-full').style.display = 'none'; document.getElementById('2403.18756v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submitted to European Heart Journal - Cardiovascular Imaging Added also the additional material 44 pages (30 main paper, 14 additional material), 14 figures (5 main manuscript, 9 additional material)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.08326">arXiv:2211.08326</a> <span> [<a href="https://arxiv.org/pdf/2211.08326">pdf</a>, <a href="https://arxiv.org/format/2211.08326">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Contrastive learning for regression in multi-site brain age prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Barbano%2C+C+A">Carlo Alberto Barbano</a>, <a href="/search/cs?searchtype=author&query=Dufumier%2C+B">Benoit Dufumier</a>, <a href="/search/cs?searchtype=author&query=Duchesnay%2C+E">Edouard Duchesnay</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a>, <a href="/search/cs?searchtype=author&query=Gori%2C+P">Pietro Gori</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.08326v2-abstract-short" style="display: inline;"> Building accurate Deep Learning (DL) models for brain age prediction is a very relevant topic in neuroimaging, as it could help better understand neurodegenerative disorders and find new biomarkers. To estimate accurate and generalizable models, large datasets have been collected, which are often multi-site and multi-scanner. This large heterogeneity negatively affects the generalization performan… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.08326v2-abstract-full').style.display = 'inline'; document.getElementById('2211.08326v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.08326v2-abstract-full" style="display: none;"> Building accurate Deep Learning (DL) models for brain age prediction is a very relevant topic in neuroimaging, as it could help better understand neurodegenerative disorders and find new biomarkers. To estimate accurate and generalizable models, large datasets have been collected, which are often multi-site and multi-scanner. This large heterogeneity negatively affects the generalization performance of DL models since they are prone to overfit site-related noise. Recently, contrastive learning approaches have been shown to be more robust against noise in data or labels. For this reason, we propose a novel contrastive learning regression loss for robust brain age prediction using MRI scans. Our method achieves state-of-the-art performance on the OpenBHB challenge, yielding the best generalization capability and robustness to site-related noise. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.08326v2-abstract-full').style.display = 'none'; document.getElementById('2211.08326v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.05568">arXiv:2211.05568</a> <span> [<a href="https://arxiv.org/pdf/2211.05568">pdf</a>, <a href="https://arxiv.org/format/2211.05568">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Unbiased Supervised Contrastive Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Barbano%2C+C+A">Carlo Alberto Barbano</a>, <a href="/search/cs?searchtype=author&query=Dufumier%2C+B">Benoit Dufumier</a>, <a href="/search/cs?searchtype=author&query=Tartaglione%2C+E">Enzo Tartaglione</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a>, <a href="/search/cs?searchtype=author&query=Gori%2C+P">Pietro Gori</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.05568v4-abstract-short" style="display: inline;"> Many datasets are biased, namely they contain easy-to-learn features that are highly correlated with the target class only in the dataset but not in the true underlying distribution of the data. For this reason, learning unbiased models from biased data has become a very relevant research topic in the last years. In this work, we tackle the problem of learning representations that are robust to bi… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.05568v4-abstract-full').style.display = 'inline'; document.getElementById('2211.05568v4-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.05568v4-abstract-full" style="display: none;"> Many datasets are biased, namely they contain easy-to-learn features that are highly correlated with the target class only in the dataset but not in the true underlying distribution of the data. For this reason, learning unbiased models from biased data has become a very relevant research topic in the last years. In this work, we tackle the problem of learning representations that are robust to biases. We first present a margin-based theoretical framework that allows us to clarify why recent contrastive losses (InfoNCE, SupCon, etc.) can fail when dealing with biased data. Based on that, we derive a novel formulation of the supervised contrastive loss (epsilon-SupInfoNCE), providing more accurate control of the minimal distance between positive and negative samples. Furthermore, thanks to our theoretical framework, we also propose FairKL, a new debiasing regularization loss, that works well even with extremely biased data. We validate the proposed losses on standard vision datasets including CIFAR10, CIFAR100, and ImageNet, and we assess the debiasing capability of FairKL with epsilon-SupInfoNCE, reaching state-of-the-art performance on a number of biased datasets, including real instances of biases in the wild. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.05568v4-abstract-full').style.display = 'none'; document.getElementById('2211.05568v4-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at ICLR 2023 (v3); Fix typo in Eq.19 (v4)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2208.09203">arXiv:2208.09203</a> <span> [<a href="https://arxiv.org/pdf/2208.09203">pdf</a>, <a href="https://arxiv.org/format/2208.09203">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Towards Efficient Capsule Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Renzulli%2C+R">Riccardo Renzulli</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2208.09203v1-abstract-short" style="display: inline;"> From the moment Neural Networks dominated the scene for image processing, the computational complexity needed to solve the targeted tasks skyrocketed: against such an unsustainable trend, many strategies have been developed, ambitiously targeting performance's preservation. Promoting sparse topologies, for example, allows the deployment of deep neural networks models on embedded, resource-constrai… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.09203v1-abstract-full').style.display = 'inline'; document.getElementById('2208.09203v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2208.09203v1-abstract-full" style="display: none;"> From the moment Neural Networks dominated the scene for image processing, the computational complexity needed to solve the targeted tasks skyrocketed: against such an unsustainable trend, many strategies have been developed, ambitiously targeting performance's preservation. Promoting sparse topologies, for example, allows the deployment of deep neural networks models on embedded, resource-constrained devices. Recently, Capsule Networks were introduced to enhance explainability of a model, where each capsule is an explicit representation of an object or its parts. These models show promising results on toy datasets, but their low scalability prevents deployment on more complex tasks. In this work, we explore sparsity besides capsule representations to improve their computational efficiency by reducing the number of capsules. We show how pruning with Capsule Network achieves high generalization with less memory requirements, computational effort, and inference and training time. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.09203v1-abstract-full').style.display = 'none'; document.getElementById('2208.09203v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at ICIP 2022 Special Session SCENA: Simplification, Compression and Efficiency with Neural networks and Artificial intelligence</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2207.09455">arXiv:2207.09455</a> <span> [<a href="https://arxiv.org/pdf/2207.09455">pdf</a>, <a href="https://arxiv.org/format/2207.09455">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> To update or not to update? Neurons at equilibrium in deep models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Bragagnolo%2C+A">Andrea Bragagnolo</a>, <a href="/search/cs?searchtype=author&query=Tartaglione%2C+E">Enzo Tartaglione</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2207.09455v3-abstract-short" style="display: inline;"> Recent advances in deep learning optimization showed that, with some a-posteriori information on fully-trained models, it is possible to match the same performance by simply training a subset of their parameters. Such a discovery has a broad impact from theory to applications, driving the research towards methods to identify the minimum subset of parameters to train without look-ahead information… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.09455v3-abstract-full').style.display = 'inline'; document.getElementById('2207.09455v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2207.09455v3-abstract-full" style="display: none;"> Recent advances in deep learning optimization showed that, with some a-posteriori information on fully-trained models, it is possible to match the same performance by simply training a subset of their parameters. Such a discovery has a broad impact from theory to applications, driving the research towards methods to identify the minimum subset of parameters to train without look-ahead information exploitation. However, the methods proposed do not match the state-of-the-art performance, and rely on unstructured sparsely connected models. In this work we shift our focus from the single parameters to the behavior of the whole neuron, exploiting the concept of neuronal equilibrium (NEq). When a neuron is in a configuration at equilibrium (meaning that it has learned a specific input-output relationship), we can halt its update; on the contrary, when a neuron is at non-equilibrium, we let its state evolve towards an equilibrium state, updating its parameters. The proposed approach has been tested on different state-of-the-art learning strategies and tasks, validating NEq and observing that the neuronal equilibrium depends on the specific learning setup. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.09455v3-abstract-full').style.display = 'none'; document.getElementById('2207.09455v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2207.02000">arXiv:2207.02000</a> <span> [<a href="https://arxiv.org/pdf/2207.02000">pdf</a>, <a href="https://arxiv.org/format/2207.02000">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.neucom.2023.126612">10.1016/j.neucom.2023.126612 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Disentangling private classes through regularization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Tartaglione%2C+E">Enzo Tartaglione</a>, <a href="/search/cs?searchtype=author&query=Gennari%2C+F">Francesca Gennari</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2207.02000v1-abstract-short" style="display: inline;"> Deep learning models are nowadays broadly deployed to solve an incredibly large variety of tasks. However, little attention has been devoted to connected legal aspects. In 2016, the European Union approved the General Data Protection Regulation which entered into force in 2018. Its main rationale was to protect the privacy and data protection of its citizens by the way of operating of the so-calle… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.02000v1-abstract-full').style.display = 'inline'; document.getElementById('2207.02000v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2207.02000v1-abstract-full" style="display: none;"> Deep learning models are nowadays broadly deployed to solve an incredibly large variety of tasks. However, little attention has been devoted to connected legal aspects. In 2016, the European Union approved the General Data Protection Regulation which entered into force in 2018. Its main rationale was to protect the privacy and data protection of its citizens by the way of operating of the so-called "Data Economy". As data is the fuel of modern Artificial Intelligence, it is argued that the GDPR can be partly applicable to a series of algorithmic decision making tasks before a more structured AI Regulation enters into force. In the meantime, AI should not allow undesired information leakage deviating from the purpose for which is created. In this work we propose DisP, an approach for deep learning models disentangling the information related to some classes we desire to keep private, from the data processed by AI. In particular, DisP is a regularization strategy de-correlating the features belonging to the same private class at training time, hiding the information of private classes membership. Our experiments on state-of-the-art deep learning models show the effectiveness of DisP, minimizing the risk of extraction for the classes we desire to keep private. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.02000v1-abstract-full').style.display = 'none'; document.getElementById('2207.02000v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2204.12941">arXiv:2204.12941</a> <span> [<a href="https://arxiv.org/pdf/2204.12941">pdf</a>, <a href="https://arxiv.org/format/2204.12941">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TAI.2024.3514554">10.1109/TAI.2024.3514554 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Unsupervised Learning of Unbiased Visual Representations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Barbano%2C+C+A">Carlo Alberto Barbano</a>, <a href="/search/cs?searchtype=author&query=Tartaglione%2C+E">Enzo Tartaglione</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2204.12941v2-abstract-short" style="display: inline;"> Deep neural networks often struggle to learn robust representations in the presence of dataset biases, leading to suboptimal generalization on unbiased datasets. This limitation arises because the models heavily depend on peripheral and confounding factors, inadvertently acquired during training. Existing approaches to address this problem typically involve explicit supervision of bias attributes… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.12941v2-abstract-full').style.display = 'inline'; document.getElementById('2204.12941v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2204.12941v2-abstract-full" style="display: none;"> Deep neural networks often struggle to learn robust representations in the presence of dataset biases, leading to suboptimal generalization on unbiased datasets. This limitation arises because the models heavily depend on peripheral and confounding factors, inadvertently acquired during training. Existing approaches to address this problem typically involve explicit supervision of bias attributes or reliance on prior knowledge about the biases. In this study, we address the challenging scenario where no explicit annotations of bias are available, and there's no prior knowledge about its nature. We present a fully unsupervised debiasing framework with three key steps: firstly, leveraging the inherent tendency to learn malignant biases to acquire a bias-capturing model; next, employing a pseudo-labeling process to obtain bias labels; and finally, applying cutting-edge supervised debiasing techniques to achieve an unbiased model. Additionally, we introduce a theoretical framework for evaluating model biasedness and conduct a detailed analysis of how biases impact neural network training. Experimental results on both synthetic and real-world datasets demonstrate the effectiveness of our method, showcasing state-of-the-art performance in various settings, occasionally surpassing fully supervised debiasing approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.12941v2-abstract-full').style.display = 'none'; document.getElementById('2204.12941v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 April, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at IEEE Transactions on Artificial Intelligence (TAI)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68T07 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2204.01298">arXiv:2204.01298</a> <span> [<a href="https://arxiv.org/pdf/2204.01298">pdf</a>, <a href="https://arxiv.org/format/2204.01298">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> REM: Routing Entropy Minimization for Capsule Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Renzulli%2C+R">Riccardo Renzulli</a>, <a href="/search/cs?searchtype=author&query=Tartaglione%2C+E">Enzo Tartaglione</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2204.01298v1-abstract-short" style="display: inline;"> Capsule Networks ambition is to build an explainable and biologically-inspired neural network model. One of their main innovations relies on the routing mechanism which extracts a parse tree: its main purpose is to explicitly build relationships between capsules. However, their true potential in terms of explainability has not surfaced yet: these relationships are extremely heterogeneous and diffi… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.01298v1-abstract-full').style.display = 'inline'; document.getElementById('2204.01298v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2204.01298v1-abstract-full" style="display: none;"> Capsule Networks ambition is to build an explainable and biologically-inspired neural network model. One of their main innovations relies on the routing mechanism which extracts a parse tree: its main purpose is to explicitly build relationships between capsules. However, their true potential in terms of explainability has not surfaced yet: these relationships are extremely heterogeneous and difficult to understand. This paper proposes REM, a technique which minimizes the entropy of the parse tree-like structure, improving its explainability. We accomplish this by driving the model parameters distribution towards low entropy configurations, using a pruning mechanism as a proxy. We also generate static parse trees with no performance loss, showing that, with REM, Capsule Networks build stronger relationships between capsules. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.01298v1-abstract-full').style.display = 'none'; document.getElementById('2204.01298v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 April, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2107.05298">arXiv:2107.05298</a> <span> [<a href="https://arxiv.org/pdf/2107.05298">pdf</a>, <a href="https://arxiv.org/ps/2107.05298">ps</a>, <a href="https://arxiv.org/format/2107.05298">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.neucom.2021.07.022">10.1016/j.neucom.2021.07.022 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> HEMP: High-order Entropy Minimization for neural network comPression </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Tartaglione%2C+E">Enzo Tartaglione</a>, <a href="/search/cs?searchtype=author&query=Lathuili%C3%A8re%2C+S">St茅phane Lathuili猫re</a>, <a href="/search/cs?searchtype=author&query=Fiandrotti%2C+A">Attilio Fiandrotti</a>, <a href="/search/cs?searchtype=author&query=Cagnazzo%2C+M">Marco Cagnazzo</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2107.05298v1-abstract-short" style="display: inline;"> We formulate the entropy of a quantized artificial neural network as a differentiable function that can be plugged as a regularization term into the cost function minimized by gradient descent. Our formulation scales efficiently beyond the first order and is agnostic of the quantization scheme. The network can then be trained to minimize the entropy of the quantized parameters, so that they can be… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.05298v1-abstract-full').style.display = 'inline'; document.getElementById('2107.05298v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2107.05298v1-abstract-full" style="display: none;"> We formulate the entropy of a quantized artificial neural network as a differentiable function that can be plugged as a regularization term into the cost function minimized by gradient descent. Our formulation scales efficiently beyond the first order and is agnostic of the quantization scheme. The network can then be trained to minimize the entropy of the quantized parameters, so that they can be optimally compressed via entropy coding. We experiment with our entropy formulation at quantizing and compressing well-known network architectures over multiple datasets. Our approach compares favorably over similar methods, enjoying the benefits of higher order entropy estimate, showing flexibility towards non-uniform quantization (we use Lloyd-max quantization), scalability towards any entropy order to be minimized and efficiency in terms of compression. We show that HEMP is able to work in synergy with other approaches aiming at pruning or quantizing the model itself, delivering significant benefits in terms of storage size compressibility without harming the model's performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.05298v1-abstract-full').style.display = 'none'; document.getElementById('2107.05298v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 July, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2103.02023">arXiv:2103.02023</a> <span> [<a href="https://arxiv.org/pdf/2103.02023">pdf</a>, <a href="https://arxiv.org/format/2103.02023">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/CVPR46437.2021.01330">10.1109/CVPR46437.2021.01330 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> EnD: Entangling and Disentangling deep representations for bias correction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Tartaglione%2C+E">Enzo Tartaglione</a>, <a href="/search/cs?searchtype=author&query=Barbano%2C+C+A">Carlo Alberto Barbano</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2103.02023v1-abstract-short" style="display: inline;"> Artificial neural networks perform state-of-the-art in an ever-growing number of tasks, and nowadays they are used to solve an incredibly large variety of tasks. There are problems, like the presence of biases in the training data, which question the generalization capability of these models. In this work we propose EnD, a regularization strategy whose aim is to prevent deep models from learning u… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.02023v1-abstract-full').style.display = 'inline'; document.getElementById('2103.02023v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2103.02023v1-abstract-full" style="display: none;"> Artificial neural networks perform state-of-the-art in an ever-growing number of tasks, and nowadays they are used to solve an incredibly large variety of tasks. There are problems, like the presence of biases in the training data, which question the generalization capability of these models. In this work we propose EnD, a regularization strategy whose aim is to prevent deep models from learning unwanted biases. In particular, we insert an "information bottleneck" at a certain point of the deep neural network, where we disentangle the information about the bias, still letting the useful information for the training task forward-propagating in the rest of the model. One big advantage of EnD is that we do not require additional training complexity (like decoders or extra layers in the model), since it is a regularizer directly applied on the trained model. Our experiments show that EnD effectively improves the generalization on unbiased test sets, and it can be effectively applied on real-case scenarios, like removing hidden biases in the COVID-19 detection from radiographic images. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.02023v1-abstract-full').style.display = 'none'; document.getElementById('2103.02023v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 March, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.05498">arXiv:2102.05498</a> <span> [<a href="https://arxiv.org/pdf/2102.05498">pdf</a>, <a href="https://arxiv.org/format/2102.05498">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-981-16-3880-0_34">10.1007/978-981-16-3880-0_34 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Dysplasia grading of colorectal polyps through CNN analysis of WSI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Perlo%2C+D">Daniele Perlo</a>, <a href="/search/cs?searchtype=author&query=Tartaglione%2C+E">Enzo Tartaglione</a>, <a href="/search/cs?searchtype=author&query=Bertero%2C+L">Luca Bertero</a>, <a href="/search/cs?searchtype=author&query=Cassoni%2C+P">Paola Cassoni</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.05498v1-abstract-short" style="display: inline;"> Colorectal cancer is a leading cause of cancer death for both men and women. For this reason, histopathological characterization of colorectal polyps is the major instrument for the pathologist in order to infer the actual risk for cancer and to guide further follow-up. Colorectal polyps diagnosis includes the evaluation of the polyp type, and more importantly, the grade of dysplasia. This latter… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.05498v1-abstract-full').style.display = 'inline'; document.getElementById('2102.05498v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.05498v1-abstract-full" style="display: none;"> Colorectal cancer is a leading cause of cancer death for both men and women. For this reason, histopathological characterization of colorectal polyps is the major instrument for the pathologist in order to infer the actual risk for cancer and to guide further follow-up. Colorectal polyps diagnosis includes the evaluation of the polyp type, and more importantly, the grade of dysplasia. This latter evaluation represents a critical step for the clinical follow-up. The proposed deep learning-based classification pipeline is based on state-of-the-art convolutional neural network, trained using proper countermeasures to tackle WSI high resolution and very imbalanced dataset. The experimental results show that one can successfully classify adenomas dysplasia grade with 70% accuracy, which is in line with the pathologists' concordance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.05498v1-abstract-full').style.display = 'none'; document.getElementById('2102.05498v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.03773">arXiv:2102.03773</a> <span> [<a href="https://arxiv.org/pdf/2102.03773">pdf</a>, <a href="https://arxiv.org/format/2102.03773">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TNNLS.2021.3084527">10.1109/TNNLS.2021.3084527 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> SeReNe: Sensitivity based Regularization of Neurons for Structured Sparsity in Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Tartaglione%2C+E">Enzo Tartaglione</a>, <a href="/search/cs?searchtype=author&query=Bragagnolo%2C+A">Andrea Bragagnolo</a>, <a href="/search/cs?searchtype=author&query=Odierna%2C+F">Francesco Odierna</a>, <a href="/search/cs?searchtype=author&query=Fiandrotti%2C+A">Attilio Fiandrotti</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.03773v1-abstract-short" style="display: inline;"> Deep neural networks include millions of learnable parameters, making their deployment over resource-constrained devices problematic. SeReNe (Sensitivity-based Regularization of Neurons) is a method for learning sparse topologies with a structure, exploiting neural sensitivity as a regularizer. We define the sensitivity of a neuron as the variation of the network output with respect to the variati… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.03773v1-abstract-full').style.display = 'inline'; document.getElementById('2102.03773v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.03773v1-abstract-full" style="display: none;"> Deep neural networks include millions of learnable parameters, making their deployment over resource-constrained devices problematic. SeReNe (Sensitivity-based Regularization of Neurons) is a method for learning sparse topologies with a structure, exploiting neural sensitivity as a regularizer. We define the sensitivity of a neuron as the variation of the network output with respect to the variation of the activity of the neuron. The lower the sensitivity of a neuron, the less the network output is perturbed if the neuron output changes. By including the neuron sensitivity in the cost function as a regularization term, we areable to prune neurons with low sensitivity. As entire neurons are pruned rather then single parameters, practical network footprint reduction becomes possible. Our experimental results on multiple network architectures and datasets yield competitive compression ratios with respect to state-of-the-art references. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.03773v1-abstract-full').style.display = 'none'; document.getElementById('2102.03773v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2101.10223">arXiv:2101.10223</a> <span> [<a href="https://arxiv.org/pdf/2101.10223">pdf</a>, <a href="https://arxiv.org/format/2101.10223">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-031-06427-2_15">10.1007/978-3-031-06427-2_15 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> A two-step explainable approach for COVID-19 computer-aided diagnosis from chest x-ray images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Barbano%2C+C+A">Carlo Alberto Barbano</a>, <a href="/search/cs?searchtype=author&query=Tartaglione%2C+E">Enzo Tartaglione</a>, <a href="/search/cs?searchtype=author&query=Berzovini%2C+C">Claudio Berzovini</a>, <a href="/search/cs?searchtype=author&query=Calandri%2C+M">Marco Calandri</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2101.10223v1-abstract-short" style="display: inline;"> Early screening of patients is a critical issue in order to assess immediate and fast responses against the spread of COVID-19. The use of nasopharyngeal swabs has been considered the most viable approach; however, the result is not immediate or, in the case of fast exams, sufficiently accurate. Using Chest X-Ray (CXR) imaging for early screening potentially provides faster and more accurate respo… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.10223v1-abstract-full').style.display = 'inline'; document.getElementById('2101.10223v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2101.10223v1-abstract-full" style="display: none;"> Early screening of patients is a critical issue in order to assess immediate and fast responses against the spread of COVID-19. The use of nasopharyngeal swabs has been considered the most viable approach; however, the result is not immediate or, in the case of fast exams, sufficiently accurate. Using Chest X-Ray (CXR) imaging for early screening potentially provides faster and more accurate response; however, diagnosing COVID from CXRs is hard and we should rely on deep learning support, whose decision process is, on the other hand, "black-boxed" and, for such reason, untrustworthy. We propose an explainable two-step diagnostic approach, where we first detect known pathologies (anomalies) in the lungs, on top of which we diagnose the illness. Our approach achieves promising performance in COVID detection, compatible with expert human radiologists. All of our experiments have been carried out bearing in mind that, especially for clinical applications, explainability plays a major role for building trust in machine learning algorithms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.10223v1-abstract-full').style.display = 'none'; document.getElementById('2101.10223v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 4 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.0; I.2.6 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2101.09991">arXiv:2101.09991</a> <span> [<a href="https://arxiv.org/pdf/2101.09991">pdf</a>, <a href="https://arxiv.org/format/2101.09991">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ICIP42928.2021.9506198">10.1109/ICIP42928.2021.9506198 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> UniToPatho, a labeled histopathological dataset for colorectal polyps classification and adenoma dysplasia grading </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Barbano%2C+C+A">Carlo Alberto Barbano</a>, <a href="/search/cs?searchtype=author&query=Perlo%2C+D">Daniele Perlo</a>, <a href="/search/cs?searchtype=author&query=Tartaglione%2C+E">Enzo Tartaglione</a>, <a href="/search/cs?searchtype=author&query=Fiandrotti%2C+A">Attilio Fiandrotti</a>, <a href="/search/cs?searchtype=author&query=Bertero%2C+L">Luca Bertero</a>, <a href="/search/cs?searchtype=author&query=Cassoni%2C+P">Paola Cassoni</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2101.09991v2-abstract-short" style="display: inline;"> Histopathological characterization of colorectal polyps allows to tailor patients' management and follow up with the ultimate aim of avoiding or promptly detecting an invasive carcinoma. Colorectal polyps characterization relies on the histological analysis of tissue samples to determine the polyps malignancy and dysplasia grade. Deep neural networks achieve outstanding accuracy in medical pattern… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.09991v2-abstract-full').style.display = 'inline'; document.getElementById('2101.09991v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2101.09991v2-abstract-full" style="display: none;"> Histopathological characterization of colorectal polyps allows to tailor patients' management and follow up with the ultimate aim of avoiding or promptly detecting an invasive carcinoma. Colorectal polyps characterization relies on the histological analysis of tissue samples to determine the polyps malignancy and dysplasia grade. Deep neural networks achieve outstanding accuracy in medical patterns recognition, however they require large sets of annotated training images. We introduce UniToPatho, an annotated dataset of 9536 hematoxylin and eosin (H&E) stained patches extracted from 292 whole-slide images, meant for training deep neural networks for colorectal polyps classification and adenomas grading. We present our dataset and provide insights on how to tackle the problem of automatic colorectal polyps characterization. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.09991v2-abstract-full').style.display = 'none'; document.getElementById('2101.09991v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 25 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 3 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.0; I.2.6 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2101.05992">arXiv:2101.05992</a> <span> [<a href="https://arxiv.org/pdf/2101.05992">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Neural Network-derived perfusion maps: a Model-free approach to computed tomography perfusion in patients with acute ischemic stroke </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gava%2C+U+A">Umberto A. Gava</a>, <a href="/search/cs?searchtype=author&query=D%27Agata%2C+F">Federico D'Agata</a>, <a href="/search/cs?searchtype=author&query=Tartaglione%2C+E">Enzo Tartaglione</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a>, <a href="/search/cs?searchtype=author&query=Bertolino%2C+F">Francesca Bertolino</a>, <a href="/search/cs?searchtype=author&query=Santonocito%2C+A">Ambra Santonocito</a>, <a href="/search/cs?searchtype=author&query=Bennink%2C+E">Edwin Bennink</a>, <a href="/search/cs?searchtype=author&query=Bergui%2C+M">Mauro Bergui</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2101.05992v1-abstract-short" style="display: inline;"> Purpose: In this study we investigate whether a Convolutional Neural Network (CNN) can generate clinically relevant parametric maps from CT perfusion data in a clinical setting of patients with acute ischemic stroke. Methods: Training of the CNN was done on a subset of 100 perfusion data, while 15 samples were used as validation. All the data used for the training/validation of the network and to… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.05992v1-abstract-full').style.display = 'inline'; document.getElementById('2101.05992v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2101.05992v1-abstract-full" style="display: none;"> Purpose: In this study we investigate whether a Convolutional Neural Network (CNN) can generate clinically relevant parametric maps from CT perfusion data in a clinical setting of patients with acute ischemic stroke. Methods: Training of the CNN was done on a subset of 100 perfusion data, while 15 samples were used as validation. All the data used for the training/validation of the network and to generate ground truth (GT) maps, using a state-of-the-art deconvolution-algorithm, were previously pre-processed using a standard pipeline. Validation was carried out through manual segmentation of infarct core and penumbra on both CNN-derived maps and GT maps. Concordance among segmented lesions was assessed using the Dice and the Pearson correlation coefficients across lesion volumes. Results: Mean Dice scores from two different raters and the GT maps were > 0.70 (good-matching). Inter-rater concordance was also high and strong correlation was found between lesion volumes of CNN maps and GT maps (0.99, 0.98). Conclusion: Our CNN-based approach generated clinically relevant perfusion maps that are comparable to state-of-the-art perfusion analysis methods based on deconvolution of the data. Moreover, the proposed technique requires less information to estimate the ischemic core and thus might allow the development of novel perfusion protocols with lower radiation dose. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.05992v1-abstract-full').style.display = 'none'; document.getElementById('2101.05992v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2011.09905">arXiv:2011.09905</a> <span> [<a href="https://arxiv.org/pdf/2011.09905">pdf</a>, <a href="https://arxiv.org/format/2011.09905">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.neunet.2021.11.029">10.1016/j.neunet.2021.11.029 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> LOss-Based SensiTivity rEgulaRization: towards deep sparse neural networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Tartaglione%2C+E">Enzo Tartaglione</a>, <a href="/search/cs?searchtype=author&query=Bragagnolo%2C+A">Andrea Bragagnolo</a>, <a href="/search/cs?searchtype=author&query=Fiandrotti%2C+A">Attilio Fiandrotti</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2011.09905v1-abstract-short" style="display: inline;"> LOBSTER (LOss-Based SensiTivity rEgulaRization) is a method for training neural networks having a sparse topology. Let the sensitivity of a network parameter be the variation of the loss function with respect to the variation of the parameter. Parameters with low sensitivity, i.e. having little impact on the loss when perturbed, are shrunk and then pruned to sparsify the network. Our method allows… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.09905v1-abstract-full').style.display = 'inline'; document.getElementById('2011.09905v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2011.09905v1-abstract-full" style="display: none;"> LOBSTER (LOss-Based SensiTivity rEgulaRization) is a method for training neural networks having a sparse topology. Let the sensitivity of a network parameter be the variation of the loss function with respect to the variation of the parameter. Parameters with low sensitivity, i.e. having little impact on the loss when perturbed, are shrunk and then pruned to sparsify the network. Our method allows to train a network from scratch, i.e. without preliminary learning or rewinding. Experiments on multiple architectures and datasets show competitive compression ratios with minimal computational overhead. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.09905v1-abstract-full').style.display = 'none'; document.getElementById('2011.09905v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2008.01430">arXiv:2008.01430</a> <span> [<a href="https://arxiv.org/pdf/2008.01430">pdf</a>, <a href="https://arxiv.org/format/2008.01430">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TrustCom50675.2020.00126">10.1109/TrustCom50675.2020.00126 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> A non-discriminatory approach to ethical deep learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Tartaglione%2C+E">Enzo Tartaglione</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2008.01430v1-abstract-short" style="display: inline;"> Artificial neural networks perform state-of-the-art in an ever-growing number of tasks, nowadays they are used to solve an incredibly large variety of tasks. However, typical training strategies do not take into account lawful, ethical and discriminatory potential issues the trained ANN models could incur in. In this work we propose NDR, a non-discriminatory regularization strategy to prevent the… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.01430v1-abstract-full').style.display = 'inline'; document.getElementById('2008.01430v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2008.01430v1-abstract-full" style="display: none;"> Artificial neural networks perform state-of-the-art in an ever-growing number of tasks, nowadays they are used to solve an incredibly large variety of tasks. However, typical training strategies do not take into account lawful, ethical and discriminatory potential issues the trained ANN models could incur in. In this work we propose NDR, a non-discriminatory regularization strategy to prevent the ANN model to solve the target task using some discriminatory features like, for example, the ethnicity in an image classification task for human faces. In particular, a part of the ANN model is trained to hide the discriminatory information such that the rest of the network focuses in learning the given learning task. Our experiments show that NDR can be exploited to achieve non-discriminatory models with both minimal computational overhead and performance loss. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.01430v1-abstract-full').style.display = 'none'; document.getElementById('2008.01430v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 August, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2004.14765">arXiv:2004.14765</a> <span> [<a href="https://arxiv.org/pdf/2004.14765">pdf</a>, <a href="https://arxiv.org/ps/2004.14765">ps</a>, <a href="https://arxiv.org/format/2004.14765">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Disordered Systems and Neural Networks">cond-mat.dis-nn</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-030-61616-8_6">10.1007/978-3-030-61616-8_6 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Pruning artificial neural networks: a way to find well-generalizing, high-entropy sharp minima </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Tartaglione%2C+E">Enzo Tartaglione</a>, <a href="/search/cs?searchtype=author&query=Bragagnolo%2C+A">Andrea Bragagnolo</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2004.14765v1-abstract-short" style="display: inline;"> Recently, a race towards the simplification of deep networks has begun, showing that it is effectively possible to reduce the size of these models with minimal or no performance loss. However, there is a general lack in understanding why these pruning strategies are effective. In this work, we are going to compare and analyze pruned solutions with two different pruning approaches, one-shot and gra… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.14765v1-abstract-full').style.display = 'inline'; document.getElementById('2004.14765v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2004.14765v1-abstract-full" style="display: none;"> Recently, a race towards the simplification of deep networks has begun, showing that it is effectively possible to reduce the size of these models with minimal or no performance loss. However, there is a general lack in understanding why these pruning strategies are effective. In this work, we are going to compare and analyze pruned solutions with two different pruning approaches, one-shot and gradual, showing the higher effectiveness of the latter. In particular, we find that gradual pruning allows access to narrow, well-generalizing minima, which are typically ignored when using one-shot approaches. In this work we also propose PSP-entropy, a measure to understand how a given neuron correlates to some specific learned classes. Interestingly, we observe that the features extracted by iteratively-pruned models are less correlated to specific classes, potentially making these models a better fit in transfer learning approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.14765v1-abstract-full').style.display = 'none'; document.getElementById('2004.14765v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2004.05405">arXiv:2004.05405</a> <span> [<a href="https://arxiv.org/pdf/2004.05405">pdf</a>, <a href="https://arxiv.org/format/2004.05405">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.3390/ijerph17186933">10.3390/ijerph17186933 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Unveiling COVID-19 from Chest X-ray with deep learning: a hurdles race with small data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Tartaglione%2C+E">Enzo Tartaglione</a>, <a href="/search/cs?searchtype=author&query=Barbano%2C+C+A">Carlo Alberto Barbano</a>, <a href="/search/cs?searchtype=author&query=Berzovini%2C+C">Claudio Berzovini</a>, <a href="/search/cs?searchtype=author&query=Calandri%2C+M">Marco Calandri</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2004.05405v1-abstract-short" style="display: inline;"> The possibility to use widespread and simple chest X-ray (CXR) imaging for early screening of COVID-19 patients is attracting much interest from both the clinical and the AI community. In this study we provide insights and also raise warnings on what is reasonable to expect by applying deep-learning to COVID classification of CXR images. We provide a methodological guide and critical reading of an… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.05405v1-abstract-full').style.display = 'inline'; document.getElementById('2004.05405v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2004.05405v1-abstract-full" style="display: none;"> The possibility to use widespread and simple chest X-ray (CXR) imaging for early screening of COVID-19 patients is attracting much interest from both the clinical and the AI community. In this study we provide insights and also raise warnings on what is reasonable to expect by applying deep-learning to COVID classification of CXR images. We provide a methodological guide and critical reading of an extensive set of statistical results that can be obtained using currently available datasets. In particular, we take the challenge posed by current small size COVID data and show how significant can be the bias introduced by transfer-learning using larger public non-COVID CXR datasets. We also contribute by providing results on a medium size COVID CXR dataset, just collected by one of the major emergency hospitals in Northern Italy during the peak of the COVID pandemic. These novel data allow us to contribute to validate the generalization capacity of preliminary results circulating in the scientific community. Our conclusions shed some light into the possibility to effectively discriminate COVID using CXR. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.05405v1-abstract-full').style.display = 'none'; document.getElementById('2004.05405v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Int. J. Environ. Res. Public Health 2020, 17(18), 6933 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1907.08544">arXiv:1907.08544</a> <span> [<a href="https://arxiv.org/pdf/1907.08544">pdf</a>, <a href="https://arxiv.org/format/1907.08544">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-030-30484-3_16">10.1007/978-3-030-30484-3_16 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Post-synaptic potential regularization has potential </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Tartaglione%2C+E">Enzo Tartaglione</a>, <a href="/search/cs?searchtype=author&query=Perlo%2C+D">Daniele Perlo</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1907.08544v1-abstract-short" style="display: inline;"> Improving generalization is one of the main challenges for training deep neural networks on classification tasks. In particular, a number of techniques have been proposed, aiming to boost the performance on unseen data: from standard data augmentation techniques to the $\ell_2$ regularization, dropout, batch normalization, entropy-driven SGD and many more.\\ In this work we propose an elegant, sim… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.08544v1-abstract-full').style.display = 'inline'; document.getElementById('1907.08544v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1907.08544v1-abstract-full" style="display: none;"> Improving generalization is one of the main challenges for training deep neural networks on classification tasks. In particular, a number of techniques have been proposed, aiming to boost the performance on unseen data: from standard data augmentation techniques to the $\ell_2$ regularization, dropout, batch normalization, entropy-driven SGD and many more.\\ In this work we propose an elegant, simple and principled approach: post-synaptic potential regularization (PSP). We tested this regularization on a number of different state-of-the-art scenarios. Empirical results show that PSP achieves a classification error comparable to more sophisticated learning strategies in the MNIST scenario, while improves the generalization compared to $\ell_2$ regularization in deep architectures trained on CIFAR-10. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.08544v1-abstract-full').style.display = 'none'; document.getElementById('1907.08544v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 July, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1802.09843">arXiv:1802.09843</a> <span> [<a href="https://arxiv.org/pdf/1802.09843">pdf</a>, <a href="https://arxiv.org/format/1802.09843">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/s00138-020-01059-4">10.1007/s00138-020-01059-4 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Graph Laplacian for Image Anomaly Detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Verdoja%2C+F">Francesco Verdoja</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1802.09843v6-abstract-short" style="display: inline;"> Reed-Xiaoli detector (RXD) is recognized as the benchmark algorithm for image anomaly detection; however, it presents known limitations, namely the dependence over the image following a multivariate Gaussian model, the estimation and inversion of a high-dimensional covariance matrix, and the inability to effectively include spatial awareness in its evaluation. In this work, a novel graph-based sol… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1802.09843v6-abstract-full').style.display = 'inline'; document.getElementById('1802.09843v6-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1802.09843v6-abstract-full" style="display: none;"> Reed-Xiaoli detector (RXD) is recognized as the benchmark algorithm for image anomaly detection; however, it presents known limitations, namely the dependence over the image following a multivariate Gaussian model, the estimation and inversion of a high-dimensional covariance matrix, and the inability to effectively include spatial awareness in its evaluation. In this work, a novel graph-based solution to the image anomaly detection problem is proposed; leveraging the graph Fourier transform, we are able to overcome some of RXD's limitations while reducing computational cost at the same time. Tests over both hyperspectral and medical images, using both synthetic and real anomalies, prove the proposed technique is able to obtain significant gains over performance by other algorithms in the state of the art. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1802.09843v6-abstract-full').style.display = 'none'; document.getElementById('1802.09843v6-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 February, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published in Machine Vision and Applications (Springer)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Machine Vision and Applications, vol. 31, no. 1, Feb. 2020 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1707.07546">arXiv:1707.07546</a> <span> [<a href="https://arxiv.org/pdf/1707.07546">pdf</a>, <a href="https://arxiv.org/ps/1707.07546">ps</a>, <a href="https://arxiv.org/format/1707.07546">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TMM.2015.2402516">10.1109/TMM.2015.2402516 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Simple Countermeasures to Mitigate the Effect of Pollution Attack in Network Coding Based Peer-to-Peer Live Streaming </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Fiandrotti%2C+A">Attilio Fiandrotti</a>, <a href="/search/cs?searchtype=author&query=Gaeta%2C+R">Rossano Gaeta</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1707.07546v1-abstract-short" style="display: inline;"> Network coding based peer-to-peer streaming represents an effective solution to aggregate user capacities and to increase system throughput in live multimedia streaming. Nonetheless, such systems are vulnerable to pollution attacks where a handful of malicious peers can disrupt the communication by transmitting just a few bogus packets which are then recombined and relayed by unaware honest nodes,… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1707.07546v1-abstract-full').style.display = 'inline'; document.getElementById('1707.07546v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1707.07546v1-abstract-full" style="display: none;"> Network coding based peer-to-peer streaming represents an effective solution to aggregate user capacities and to increase system throughput in live multimedia streaming. Nonetheless, such systems are vulnerable to pollution attacks where a handful of malicious peers can disrupt the communication by transmitting just a few bogus packets which are then recombined and relayed by unaware honest nodes, further spreading the pollution over the network. Whereas previous research focused on malicious nodes identification schemes and pollution-resilient coding, in this paper we show pollution countermeasures which make a standard network coding scheme resilient to pollution attacks. Thanks to a simple yet effective analytical model of a reference node collecting packets by malicious and honest neighbors, we demonstrate that i) packets received earlier are less likely to be polluted and ii) short generations increase the likelihood to recover a clean generation. Therefore, we propose a recombination scheme where nodes draw packets to be recombined according to their age in the input queue, paired with a decoding scheme able to detect the reception of polluted packets early in the decoding process and short generations. The effectiveness of our approach is experimentally evaluated in a real system we developed and deployed on hundreds to thousands peers. Experimental evidence shows that, thanks to our simple countermeasures, the effect of a pollution attack is almost canceled and the video quality experienced by the peers is comparable to pre-attack levels. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1707.07546v1-abstract-full').style.display = 'none'; document.getElementById('1707.07546v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 July, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEEE Transactions on Multimedia, Volume 17, Issue 4, April 2015, Pages 562 - 573 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1309.0316">arXiv:1309.0316</a> <span> [<a href="https://arxiv.org/pdf/1309.0316">pdf</a>, <a href="https://arxiv.org/ps/1309.0316">ps</a>, <a href="https://arxiv.org/format/1309.0316">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TMM.2013.2285518">10.1109/TMM.2013.2285518 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Band Codes for Energy-Efficient Network Coding with Application to P2P Mobile Streaming </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Fiandrotti%2C+A">Attilio Fiandrotti</a>, <a href="/search/cs?searchtype=author&query=Bioglio%2C+V">Valerio Bioglio</a>, <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">Marco Grangetto</a>, <a href="/search/cs?searchtype=author&query=Gaeta%2C+R">Rossano Gaeta</a>, <a href="/search/cs?searchtype=author&query=Magli%2C+E">Enrico Magli</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1309.0316v1-abstract-short" style="display: inline;"> A key problem in random network coding (NC) lies in the complexity and energy consumption associated with the packet decoding processes, which hinder its application in mobile environments. Controlling and hence limiting such factors has always been an important but elusive research goal, since the packet degree distribution, which is the main factor driving the complexity, is altered in a non-det… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1309.0316v1-abstract-full').style.display = 'inline'; document.getElementById('1309.0316v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1309.0316v1-abstract-full" style="display: none;"> A key problem in random network coding (NC) lies in the complexity and energy consumption associated with the packet decoding processes, which hinder its application in mobile environments. Controlling and hence limiting such factors has always been an important but elusive research goal, since the packet degree distribution, which is the main factor driving the complexity, is altered in a non-deterministic way by the random recombinations at the network nodes. In this paper we tackle this problem proposing Band Codes (BC), a novel class of network codes specifically designed to preserve the packet degree distribution during packet encoding, ecombination and decoding. BC are random codes over GF(2) that exhibit low decoding complexity, feature limited and controlled degree distribution by construction, and hence allow to effectively apply NC even in energy-constrained scenarios. In particular, in this paper we motivate and describe our new design and provide a thorough analysis of its performance. We provide numerical simulations of the performance of BC in order to validate the analysis and assess the overhead of BC with respect to a onventional NC scheme. Moreover, peer-to-peer media streaming experiments with a random-push protocol show that BC reduce the decoding complexity by a factor of two, to a point where NC-based mobile streaming to mobile devices becomes practically feasible. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1309.0316v1-abstract-full').style.display = 'none'; document.getElementById('1309.0316v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 September, 2013; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2013. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To be published in IEEE Transacions on Multimedia</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> H.5.1 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/0712.0271">arXiv:0712.0271</a> <span> [<a href="https://arxiv.org/pdf/0712.0271">pdf</a>, <a href="https://arxiv.org/ps/0712.0271">ps</a>, <a href="https://arxiv.org/format/0712.0271">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Distributed Arithmetic Coding for the Asymmetric Slepian-Wolf problem </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Grangetto%2C+M">M. Grangetto</a>, <a href="/search/cs?searchtype=author&query=Magli%2C+E">E. Magli</a>, <a href="/search/cs?searchtype=author&query=Olmo%2C+G">G. Olmo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="0712.0271v2-abstract-short" style="display: inline;"> Distributed source coding schemes are typically based on the use of channels codes as source codes. In this paper we propose a new paradigm, termed "distributed arithmetic coding", which exploits the fact that arithmetic codes are good source as well as channel codes. In particular, we propose a distributed binary arithmetic coder for Slepian-Wolf coding with decoder side information, along with… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('0712.0271v2-abstract-full').style.display = 'inline'; document.getElementById('0712.0271v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="0712.0271v2-abstract-full" style="display: none;"> Distributed source coding schemes are typically based on the use of channels codes as source codes. In this paper we propose a new paradigm, termed "distributed arithmetic coding", which exploits the fact that arithmetic codes are good source as well as channel codes. In particular, we propose a distributed binary arithmetic coder for Slepian-Wolf coding with decoder side information, along with a soft joint decoder. The proposed scheme provides several advantages over existing Slepian-Wolf coders, especially its good performance at small block lengths, and the ability to incorporate arbitrary source models in the encoding process, e.g. context-based statistical models. We have compared the performance of distributed arithmetic coding with turbo codes and low-density parity-check codes, and found that the proposed approach has very competitive performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('0712.0271v2-abstract-full').style.display = 'none'; document.getElementById('0712.0271v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2008; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 December, 2007; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2007. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">submitted to IEEE Transactions on Signal processing, Nov. 2007. Revised version accepted with minor revisions</span> </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>