CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–21 of 21 results for author: <span class="mathjax">Gomez, C</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&query=Gomez%2C+C">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Gomez, C"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Gomez%2C+C&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Gomez, C"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.17424">arXiv:2411.17424</a> <span> [<a href="https://arxiv.org/pdf/2411.17424">pdf</a>, <a href="https://arxiv.org/format/2411.17424">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> A Primer on AP Power Save in Wi-Fi 8: Overview, Analysis, and Open Challenges </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Sanchez-Vital%2C+R">Roger Sanchez-Vital</a>, <a href="/search/cs?searchtype=author&query=Belogaev%2C+A">Andrey Belogaev</a>, <a href="/search/cs?searchtype=author&query=Gomez%2C+C">Carles Gomez</a>, <a href="/search/cs?searchtype=author&query=Famaey%2C+J">Jeroen Famaey</a>, <a href="/search/cs?searchtype=author&query=Garcia-Villegas%2C+E">Eduard Garcia-Villegas</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.17424v1-abstract-short" style="display: inline;"> Wi-Fi facilitates the Internet connectivity of billions of devices worldwide, making it an indispensable technology for modern life. Wi-Fi networks are becoming significantly denser, making energy consumption and its effects on operational costs and environmental sustainability crucial considerations. Wi-Fi has already introduced several mechanisms to enhance the energy efficiency of non-Access Po… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17424v1-abstract-full').style.display = 'inline'; document.getElementById('2411.17424v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.17424v1-abstract-full" style="display: none;"> Wi-Fi facilitates the Internet connectivity of billions of devices worldwide, making it an indispensable technology for modern life. Wi-Fi networks are becoming significantly denser, making energy consumption and its effects on operational costs and environmental sustainability crucial considerations. Wi-Fi has already introduced several mechanisms to enhance the energy efficiency of non-Access Point (non-AP) stations (STAs). However, the reduction of energy consumption of APs has never been a priority. Always-on APs operating at their highest capabilities consume significant power, which affects the energy costs of the infrastructure owner, aggravates the environmental impact, and decreases the lifetime of battery-powered APs. IEEE 802.11bn, which will be the basis of Wi-Fi 8, makes a big leap forward by introducing the AP Power Save (PS) framework. In this article, we describe and analyze the main proposals discussed in the IEEE 802.11bn Task Group (TGbn), such as Scheduled Power Save, (Semi-)Dynamic Power Save, and Cross Link Power Save. We also consider other proposals that are being discussed in TGbn, namely the integration of Wake-up Radios (WuR) and STA offloading. We then showcase the potential benefits of AP PS using a public dataset collected from 470 real APs deployed in a university campus. Our numerical analysis reveals that AP power consumption could be decreased on average by at least 28%. Finally, we outline the open challenges that need to be addressed to optimally integrate AP PS in Wi-Fi and ensure its compatibility with legacy devices. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17424v1-abstract-full').style.display = 'none'; document.getElementById('2411.17424v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.10096">arXiv:2410.10096</a> <span> [<a href="https://arxiv.org/pdf/2410.10096">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Innovative Deep Learning Techniques for Obstacle Recognition: A Comparative Study of Modern Detection Algorithms </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=P%C3%A9rez%2C+S">Santiago P茅rez</a>, <a href="/search/cs?searchtype=author&query=G%C3%B3mez%2C+C">Camila G贸mez</a>, <a href="/search/cs?searchtype=author&query=Rodr%C3%ADguez%2C+M">Mat铆as Rodr铆guez</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.10096v1-abstract-short" style="display: inline;"> This study explores a comprehensive approach to obstacle detection using advanced YOLO models, specifically YOLOv8, YOLOv7, YOLOv6, and YOLOv5. Leveraging deep learning techniques, the research focuses on the performance comparison of these models in real-time detection scenarios. The findings demonstrate that YOLOv8 achieves the highest accuracy with improved precision-recall metrics. Detailed tr… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.10096v1-abstract-full').style.display = 'inline'; document.getElementById('2410.10096v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.10096v1-abstract-full" style="display: none;"> This study explores a comprehensive approach to obstacle detection using advanced YOLO models, specifically YOLOv8, YOLOv7, YOLOv6, and YOLOv5. Leveraging deep learning techniques, the research focuses on the performance comparison of these models in real-time detection scenarios. The findings demonstrate that YOLOv8 achieves the highest accuracy with improved precision-recall metrics. Detailed training processes, algorithmic principles, and a range of experimental results are presented to validate the model's effectiveness. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.10096v1-abstract-full').style.display = 'none'; document.getElementById('2410.10096v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.04908">arXiv:2408.04908</a> <span> [<a href="https://arxiv.org/pdf/2408.04908">pdf</a>, <a href="https://arxiv.org/format/2408.04908">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.3390/s24175770">10.3390/s24175770 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Energy performance of LR-FHSS: analysis and evaluation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Sanchez-Vital%2C+R">Roger Sanchez-Vital</a>, <a href="/search/cs?searchtype=author&query=Casals%2C+L">Llu铆s Casals</a>, <a href="/search/cs?searchtype=author&query=Heer-Salva%2C+B">Bartomeu Heer-Salva</a>, <a href="/search/cs?searchtype=author&query=Vidal%2C+R">Rafael Vidal</a>, <a href="/search/cs?searchtype=author&query=Gomez%2C+C">Carles Gomez</a>, <a href="/search/cs?searchtype=author&query=Garcia-Villegas%2C+E">Eduard Garcia-Villegas</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.04908v2-abstract-short" style="display: inline;"> Long-range frequency hopping spread spectrum (LR-FHSS) is a pivotal advancement in the LoRaWAN protocol that is designed to enhance the network's capacity and robustness, particularly in densely populated environments. Although energy consumption is paramount in LoRaWAN-based end devices, {this is the first study} in the literature, to our knowledge, that models the impact of this novel mechanism… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.04908v2-abstract-full').style.display = 'inline'; document.getElementById('2408.04908v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.04908v2-abstract-full" style="display: none;"> Long-range frequency hopping spread spectrum (LR-FHSS) is a pivotal advancement in the LoRaWAN protocol that is designed to enhance the network's capacity and robustness, particularly in densely populated environments. Although energy consumption is paramount in LoRaWAN-based end devices, {this is the first study} in the literature, to our knowledge, that models the impact of this novel mechanism on energy consumption. In this article, we provide a comprehensive energy consumption analytical model of LR-FHSS, focusing on three critical metrics: average current consumption, battery lifetime, and energy efficiency of data transmission. The model is based on measurements performed on real hardware in a fully operational LR-FHSS network. While in our evaluation, LR-FHSS can show worse consumption figures than LoRa, we find that with optimal configuration, the battery lifetime of LR-FHSS end devices can reach 2.5 years for a 50 min notification period. For the most energy-efficient payload size, this lifespan can be extended to a theoretical maximum of up to 16 years with a one-day notification interval using a cell-coin battery. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.04908v2-abstract-full').style.display = 'none'; document.getElementById('2408.04908v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Sensors 2024, 24, 5770. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.11974">arXiv:2407.11974</a> <span> [<a href="https://arxiv.org/pdf/2407.11974">pdf</a>, <a href="https://arxiv.org/format/2407.11974">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Explainable AI Enhances Glaucoma Referrals, Yet the Human-AI Team Still Falls Short of the AI Alone </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gomez%2C+C">Catalina Gomez</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+R">Ruolin Wang</a>, <a href="/search/cs?searchtype=author&query=Breininger%2C+K">Katharina Breininger</a>, <a href="/search/cs?searchtype=author&query=Casey%2C+C">Corinne Casey</a>, <a href="/search/cs?searchtype=author&query=Bradley%2C+C">Chris Bradley</a>, <a href="/search/cs?searchtype=author&query=Pavlak%2C+M">Mitchell Pavlak</a>, <a href="/search/cs?searchtype=author&query=Pham%2C+A">Alex Pham</a>, <a href="/search/cs?searchtype=author&query=Yohannan%2C+J">Jithin Yohannan</a>, <a href="/search/cs?searchtype=author&query=Unberath%2C+M">Mathias Unberath</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.11974v1-abstract-short" style="display: inline;"> Primary care providers are vital for initial triage and referrals to specialty care. In glaucoma, asymptomatic and fast progression can lead to vision loss, necessitating timely referrals to specialists. However, primary eye care providers may not identify urgent cases, potentially delaying care. Artificial Intelligence (AI) offering explanations could enhance their referral decisions. We investig… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.11974v1-abstract-full').style.display = 'inline'; document.getElementById('2407.11974v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.11974v1-abstract-full" style="display: none;"> Primary care providers are vital for initial triage and referrals to specialty care. In glaucoma, asymptomatic and fast progression can lead to vision loss, necessitating timely referrals to specialists. However, primary eye care providers may not identify urgent cases, potentially delaying care. Artificial Intelligence (AI) offering explanations could enhance their referral decisions. We investigate how various AI explanations help providers distinguish between patients needing immediate or non-urgent specialist referrals. We built explainable AI algorithms to predict glaucoma surgery needs from routine eyecare data as a proxy for identifying high-risk patients. We incorporated intrinsic and post-hoc explainability and conducted an online study with optometrists to assess human-AI team performance, measuring referral accuracy and analyzing interactions with AI, including agreement rates, task time, and user experience perceptions. AI support enhanced referral accuracy among 87 participants (59.9%/50.8% with/without AI), though Human-AI teams underperformed compared to AI alone. Participants believed they included AI advice more when using the intrinsic model, and perceived it more useful and promising. Without explanations, deviations from AI recommendations increased. AI support did not increase workload, confidence, and trust, but reduced challenges. On a separate test set, our black-box and intrinsic models achieved an accuracy of 77% and 71%, respectively, in predicting surgical outcomes. We identify opportunities of human-AI teaming for glaucoma management in primary eye care, noting that while AI enhances referral accuracy, it also shows a performance gap compared to AI alone, even with explanations. Human involvement remains essential in medical decision making, underscoring the need for future research to optimize collaboration, ensuring positive experiences and safe AI use. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.11974v1-abstract-full').style.display = 'none'; document.getElementById('2407.11974v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 figures, 3 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.01618">arXiv:2405.01618</a> <span> [<a href="https://arxiv.org/pdf/2405.01618">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> Matter: IoT Interoperability for Smart Homes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Madadi-Barough%2C+S">Saeid Madadi-Barough</a>, <a href="/search/cs?searchtype=author&query=Ruiz-Blanco%2C+P">Pau Ruiz-Blanco</a>, <a href="/search/cs?searchtype=author&query=Lin%2C+J">Jiadeng Lin</a>, <a href="/search/cs?searchtype=author&query=Vidal%2C+R">Rafael Vidal</a>, <a href="/search/cs?searchtype=author&query=Gomez%2C+C">Carles Gomez</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.01618v1-abstract-short" style="display: inline;"> The smart home is a major Internet of Things (IoT) application domain with tremendous market expectations. However, communication solutions for smart home devices have exhibited a lack of interoperability, especially, but not only, at the highest layers of the protocol stack. This issue challenges the success of the smart home concept. In order to overcome this problem, crucial industry organizati… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.01618v1-abstract-full').style.display = 'inline'; document.getElementById('2405.01618v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.01618v1-abstract-full" style="display: none;"> The smart home is a major Internet of Things (IoT) application domain with tremendous market expectations. However, communication solutions for smart home devices have exhibited a lack of interoperability, especially, but not only, at the highest layers of the protocol stack. This issue challenges the success of the smart home concept. In order to overcome this problem, crucial industry organizations, including Google, Apple, Amazon and the Connectivity Standards Alliance (formerly, the ZigBee Alliance) have collaborated to produce Matter, a connectivity solution intended to become a universal standard for the smart home. This paper overviews, evaluates and discusses Matter, focusing on its design, features, performance, and potential future directions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.01618v1-abstract-full').style.display = 'none'; document.getElementById('2405.01618v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.17150">arXiv:2401.17150</a> <span> [<a href="https://arxiv.org/pdf/2401.17150">pdf</a>, <a href="https://arxiv.org/format/2401.17150">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> GAISSALabel: A tool for energy labeling of ML models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Duran%2C+P">Pau Duran</a>, <a href="/search/cs?searchtype=author&query=Casta%C3%B1o%2C+J">Joel Casta帽o</a>, <a href="/search/cs?searchtype=author&query=G%C3%B3mez%2C+C">Cristina G贸mez</a>, <a href="/search/cs?searchtype=author&query=Mart%C3%ADnez-Fern%C3%A1ndez%2C+S">Silverio Mart铆nez-Fern谩ndez</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.17150v1-abstract-short" style="display: inline;"> Background: The increasing environmental impact of Information Technologies, particularly in Machine Learning (ML), highlights the need for sustainable practices in software engineering. The escalating complexity and energy consumption of ML models need tools for assessing and improving their energy efficiency. Goal: This paper introduces GAISSALabel, a web-based tool designed to evaluate and labe… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.17150v1-abstract-full').style.display = 'inline'; document.getElementById('2401.17150v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.17150v1-abstract-full" style="display: none;"> Background: The increasing environmental impact of Information Technologies, particularly in Machine Learning (ML), highlights the need for sustainable practices in software engineering. The escalating complexity and energy consumption of ML models need tools for assessing and improving their energy efficiency. Goal: This paper introduces GAISSALabel, a web-based tool designed to evaluate and label the energy efficiency of ML models. Method: GAISSALabel is a technology transfer development from a former research on energy efficiency classification of ML, consisting of a holistic tool for assessing both the training and inference phases of ML models, considering various metrics such as power draw, model size efficiency, CO2e emissions and more. Results: GAISSALabel offers a labeling system for energy efficiency, akin to labels on consumer appliances, making it accessible to ML stakeholders of varying backgrounds. The tool's adaptability allows for customization in the proposed labeling system, ensuring its relevance in the rapidly evolving ML field. Conclusions: GAISSALabel represents a significant step forward in sustainable software engineering, offering a solution for balancing high-performance ML models with environmental impacts. The tool's effectiveness and market relevance will be further assessed through planned evaluations using the Technology Acceptance Model. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.17150v1-abstract-full').style.display = 'none'; document.getElementById('2401.17150v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">4 pages, 2 figures, 1 table</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.19778">arXiv:2310.19778</a> <span> [<a href="https://arxiv.org/pdf/2310.19778">pdf</a>, <a href="https://arxiv.org/format/2310.19778">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Human-AI collaboration is not very collaborative yet: A taxonomy of interaction patterns in AI-assisted decision making from a systematic review </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gomez%2C+C">Catalina Gomez</a>, <a href="/search/cs?searchtype=author&query=Cho%2C+S+M">Sue Min Cho</a>, <a href="/search/cs?searchtype=author&query=Ke%2C+S">Shichang Ke</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+C">Chien-Ming Huang</a>, <a href="/search/cs?searchtype=author&query=Unberath%2C+M">Mathias Unberath</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.19778v3-abstract-short" style="display: inline;"> Leveraging Artificial Intelligence (AI) in decision support systems has disproportionately focused on technological advancements, often overlooking the alignment between algorithmic outputs and human expectations. A human-centered perspective attempts to alleviate this concern by designing AI solutions for seamless integration with existing processes. Determining what information AI should provide… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.19778v3-abstract-full').style.display = 'inline'; document.getElementById('2310.19778v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.19778v3-abstract-full" style="display: none;"> Leveraging Artificial Intelligence (AI) in decision support systems has disproportionately focused on technological advancements, often overlooking the alignment between algorithmic outputs and human expectations. A human-centered perspective attempts to alleviate this concern by designing AI solutions for seamless integration with existing processes. Determining what information AI should provide to aid humans is vital, a concept underscored by explainable AI's efforts to justify AI predictions. However, how the information is presented, e.g., the sequence of recommendations and solicitation of interpretations, is equally crucial as complex interactions may emerge between humans and AI. While empirical studies have evaluated human-AI dynamics across domains, a common vocabulary for human-AI interaction protocols is lacking. To promote more deliberate consideration of interaction designs, we introduce a taxonomy of interaction patterns that delineate various modes of human-AI interactivity. We summarize the results of a systematic review of AI-assisted decision making literature and identify trends and opportunities in existing interactions across application domains from 105 articles. We find that current interactions are dominated by simplistic collaboration paradigms, leading to little support for truly interactive functionality. Our taxonomy offers a tool to understand interactivity with AI in decision-making and foster interaction designs for achieving clear communication, trustworthiness, and collaboration. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.19778v3-abstract-full').style.display = 'none'; document.getElementById('2310.19778v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">25 pages; 2 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.14819">arXiv:2308.14819</a> <span> [<a href="https://arxiv.org/pdf/2308.14819">pdf</a>, <a href="https://arxiv.org/ps/2308.14819">ps</a>, <a href="https://arxiv.org/format/2308.14819">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Discrete Mathematics">cs.DM</span> </div> </div> <p class="title is-5 mathjax"> A polynomial quantum computing algorithm for solving the dualization problem </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mezzini%2C+M">Mauro Mezzini</a>, <a href="/search/cs?searchtype=author&query=Gomez%2C+F+C">Fernando Cuartero Gomez</a>, <a href="/search/cs?searchtype=author&query=Pelayo%2C+F">Fernando Pelayo</a>, <a href="/search/cs?searchtype=author&query=Gonzales%2C+J+J+P">Jose Javier Paulet Gonzales</a>, <a href="/search/cs?searchtype=author&query=Calvo%2C+H+I+d+l+C">Hernan Indibil de la Cruz Calvo</a>, <a href="/search/cs?searchtype=author&query=Pascual%2C+V">Vicente Pascual</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.14819v1-abstract-short" style="display: inline;"> Given two prime monotone boolean functions $f:\{0,1\}^n \to \{0,1\}$ and $g:\{0,1\}^n \to \{0,1\}$ the dualization problem consists in determining if $g$ is the dual of $f$, that is if $f(x_1, \dots, x_n)= \overline{g}(\overline{x_1}, \dots \overline{x_n})$ for all $(x_1, \dots x_n) \in \{0,1\}^n$. Associated to the dualization problem there is the corresponding decision problem: given two monoton… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.14819v1-abstract-full').style.display = 'inline'; document.getElementById('2308.14819v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.14819v1-abstract-full" style="display: none;"> Given two prime monotone boolean functions $f:\{0,1\}^n \to \{0,1\}$ and $g:\{0,1\}^n \to \{0,1\}$ the dualization problem consists in determining if $g$ is the dual of $f$, that is if $f(x_1, \dots, x_n)= \overline{g}(\overline{x_1}, \dots \overline{x_n})$ for all $(x_1, \dots x_n) \in \{0,1\}^n$. Associated to the dualization problem there is the corresponding decision problem: given two monotone prime boolean functions $f$ and $g$ is $g$ the dual of $f$? In this paper we present a quantum computing algorithm that solves the decision version of the dualization problem in polynomial time. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.14819v1-abstract-full').style.display = 'none'; document.getElementById('2308.14819v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.04870">arXiv:2205.04870</a> <span> [<a href="https://arxiv.org/pdf/2205.04870">pdf</a>, <a href="https://arxiv.org/format/2205.04870">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Applications">stat.AP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Joint Study of Above Ground Biomass and Soil Organic Carbon for Total Carbon Estimation using Satellite Imagery in Scotland </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chan%2C+T">Terrence Chan</a>, <a href="/search/cs?searchtype=author&query=Gomez%2C+C+A">Carla Arus Gomez</a>, <a href="/search/cs?searchtype=author&query=Kothikar%2C+A">Anish Kothikar</a>, <a href="/search/cs?searchtype=author&query=Baiz%2C+P">Pedro Baiz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.04870v1-abstract-short" style="display: inline;"> Land Carbon verification has long been a challenge in the carbon credit market. Carbon verification methods currently available are expensive, and may generate low-quality credit. Scalable and accurate remote sensing techniques enable new approaches to monitor changes in Above Ground Biomass (AGB) and Soil Organic Carbon (SOC). The majority of state-of-the-art research employs remote sensing on AG… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.04870v1-abstract-full').style.display = 'inline'; document.getElementById('2205.04870v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.04870v1-abstract-full" style="display: none;"> Land Carbon verification has long been a challenge in the carbon credit market. Carbon verification methods currently available are expensive, and may generate low-quality credit. Scalable and accurate remote sensing techniques enable new approaches to monitor changes in Above Ground Biomass (AGB) and Soil Organic Carbon (SOC). The majority of state-of-the-art research employs remote sensing on AGB and SOC separately, although some studies indicate a positive correlation between the two. We intend to combine the two domains in our research to improve state-of-the-art total carbon estimation and to provide insight into the voluntary carbon trading market. We begin by establishing baseline model in our study area in Scotland, using state-of-the-art methodologies in the SOC and AGB domains. The effects of feature engineering techniques such as variance inflation factor and feature selection on machine learning models are then investigated. This is extended by combining predictor variables from the two domains. Finally, we leverage the possible correlation between AGB and SOC to establish a relationship between the two and propose novel models in an attempt outperform the state-of-the-art results. We compared three machine learning techniques, boosted regression tree, random forest, and xgboost. These techniques have been demonstrated to be the most effective in both domains. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.04870v1-abstract-full').style.display = 'none'; document.getElementById('2205.04870v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.12596">arXiv:2112.12596</a> <span> [<a href="https://arxiv.org/pdf/2112.12596">pdf</a>, <a href="https://arxiv.org/format/2112.12596">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> </div> </div> <p class="title is-5 mathjax"> Explainable Medical Imaging AI Needs Human-Centered Design: Guidelines and Evidence from a Systematic Review </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chen%2C+H">Haomin Chen</a>, <a href="/search/cs?searchtype=author&query=Gomez%2C+C">Catalina Gomez</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+C">Chien-Ming Huang</a>, <a href="/search/cs?searchtype=author&query=Unberath%2C+M">Mathias Unberath</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.12596v4-abstract-short" style="display: inline;"> Transparency in Machine Learning (ML), attempts to reveal the working mechanisms of complex models. Transparent ML promises to advance human factors engineering goals of human-centered AI in the target users. From a human-centered design perspective, transparency is not a property of the ML model but an affordance, i.e. a relationship between algorithm and user; as a result, iterative prototyping… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.12596v4-abstract-full').style.display = 'inline'; document.getElementById('2112.12596v4-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.12596v4-abstract-full" style="display: none;"> Transparency in Machine Learning (ML), attempts to reveal the working mechanisms of complex models. Transparent ML promises to advance human factors engineering goals of human-centered AI in the target users. From a human-centered design perspective, transparency is not a property of the ML model but an affordance, i.e. a relationship between algorithm and user; as a result, iterative prototyping and evaluation with users is critical to attaining adequate solutions that afford transparency. However, following human-centered design principles in healthcare and medical image analysis is challenging due to the limited availability of and access to end users. To investigate the state of transparent ML in medical image analysis, we conducted a systematic review of the literature. Our review reveals multiple severe shortcomings in the design and validation of transparent ML for medical image analysis applications. We find that most studies to date approach transparency as a property of the model itself, similar to task performance, without considering end users during neither development nor evaluation. Additionally, the lack of user research, and the sporadic validation of transparency claims put contemporary research on transparent ML for medical image analysis at risk of being incomprehensible to users, and thus, clinically irrelevant. To alleviate these shortcomings in forthcoming research while acknowledging the challenges of human-centered design in healthcare, we introduce the INTRPRT guideline, a systematic design directive for transparent ML systems in medical image analysis. The INTRPRT guideline suggests formative user research as the first step of transparent model design to understand user needs and domain requirements. Following this process produces evidence to support design choices, and ultimately, increases the likelihood that the algorithms afford transparency. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.12596v4-abstract-full').style.display = 'none'; document.getElementById('2112.12596v4-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 21 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.10074">arXiv:2112.10074</a> <span> [<a href="https://arxiv.org/pdf/2112.10074">pdf</a>, <a href="https://arxiv.org/format/2112.10074">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.59275/j.melba.2022-354b">10.59275/j.melba.2022-354b <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> QU-BraTS: MICCAI BraTS 2020 Challenge on Quantifying Uncertainty in Brain Tumor Segmentation - Analysis of Ranking Scores and Benchmarking Results </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mehta%2C+R">Raghav Mehta</a>, <a href="/search/cs?searchtype=author&query=Filos%2C+A">Angelos Filos</a>, <a href="/search/cs?searchtype=author&query=Baid%2C+U">Ujjwal Baid</a>, <a href="/search/cs?searchtype=author&query=Sako%2C+C">Chiharu Sako</a>, <a href="/search/cs?searchtype=author&query=McKinley%2C+R">Richard McKinley</a>, <a href="/search/cs?searchtype=author&query=Rebsamen%2C+M">Michael Rebsamen</a>, <a href="/search/cs?searchtype=author&query=Datwyler%2C+K">Katrin Datwyler</a>, <a href="/search/cs?searchtype=author&query=Meier%2C+R">Raphael Meier</a>, <a href="/search/cs?searchtype=author&query=Radojewski%2C+P">Piotr Radojewski</a>, <a href="/search/cs?searchtype=author&query=Murugesan%2C+G+K">Gowtham Krishnan Murugesan</a>, <a href="/search/cs?searchtype=author&query=Nalawade%2C+S">Sahil Nalawade</a>, <a href="/search/cs?searchtype=author&query=Ganesh%2C+C">Chandan Ganesh</a>, <a href="/search/cs?searchtype=author&query=Wagner%2C+B">Ben Wagner</a>, <a href="/search/cs?searchtype=author&query=Yu%2C+F+F">Fang F. Yu</a>, <a href="/search/cs?searchtype=author&query=Fei%2C+B">Baowei Fei</a>, <a href="/search/cs?searchtype=author&query=Madhuranthakam%2C+A+J">Ananth J. Madhuranthakam</a>, <a href="/search/cs?searchtype=author&query=Maldjian%2C+J+A">Joseph A. Maldjian</a>, <a href="/search/cs?searchtype=author&query=Daza%2C+L">Laura Daza</a>, <a href="/search/cs?searchtype=author&query=Gomez%2C+C">Catalina Gomez</a>, <a href="/search/cs?searchtype=author&query=Arbelaez%2C+P">Pablo Arbelaez</a>, <a href="/search/cs?searchtype=author&query=Dai%2C+C">Chengliang Dai</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+S">Shuo Wang</a>, <a href="/search/cs?searchtype=author&query=Reynaud%2C+H">Hadrien Reynaud</a>, <a href="/search/cs?searchtype=author&query=Mo%2C+Y">Yuan-han Mo</a>, <a href="/search/cs?searchtype=author&query=Angelini%2C+E">Elsa Angelini</a> , et al. (67 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.10074v2-abstract-short" style="display: inline;"> Deep learning (DL) models have provided state-of-the-art performance in various medical imaging benchmarking challenges, including the Brain Tumor Segmentation (BraTS) challenges. However, the task of focal pathology multi-compartment segmentation (e.g., tumor and lesion sub-regions) is particularly challenging, and potential errors hinder translating DL models into clinical workflows. Quantifying… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.10074v2-abstract-full').style.display = 'inline'; document.getElementById('2112.10074v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.10074v2-abstract-full" style="display: none;"> Deep learning (DL) models have provided state-of-the-art performance in various medical imaging benchmarking challenges, including the Brain Tumor Segmentation (BraTS) challenges. However, the task of focal pathology multi-compartment segmentation (e.g., tumor and lesion sub-regions) is particularly challenging, and potential errors hinder translating DL models into clinical workflows. Quantifying the reliability of DL model predictions in the form of uncertainties could enable clinical review of the most uncertain regions, thereby building trust and paving the way toward clinical translation. Several uncertainty estimation methods have recently been introduced for DL medical image segmentation tasks. Developing scores to evaluate and compare the performance of uncertainty measures will assist the end-user in making more informed decisions. In this study, we explore and evaluate a score developed during the BraTS 2019 and BraTS 2020 task on uncertainty quantification (QU-BraTS) and designed to assess and rank uncertainty estimates for brain tumor multi-compartment segmentation. This score (1) rewards uncertainty estimates that produce high confidence in correct assertions and those that assign low confidence levels at incorrect assertions, and (2) penalizes uncertainty measures that lead to a higher percentage of under-confident correct assertions. We further benchmark the segmentation uncertainties generated by 14 independent participating teams of QU-BraTS 2020, all of which also participated in the main BraTS segmentation task. Overall, our findings confirm the importance and complementary value that uncertainty estimates provide to segmentation algorithms, highlighting the need for uncertainty quantification in medical image analyses. Finally, in favor of transparency and reproducibility, our evaluation code is made publicly available at: https://github.com/RagMeh11/QU-BraTS. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.10074v2-abstract-full').style.display = 'none'; document.getElementById('2112.10074v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication at the Journal of Machine Learning for Biomedical Imaging (MELBA): https://www.melba-journal.org/papers/2022:026.html</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Machine.Learning.for.Biomedical.Imaging. 1 (2022) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2108.06246">arXiv:2108.06246</a> <span> [<a href="https://arxiv.org/pdf/2108.06246">pdf</a>, <a href="https://arxiv.org/format/2108.06246">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> An Interpretable Algorithm for Uveal Melanoma Subtyping from Whole Slide Cytology Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chen%2C+H">Haomin Chen</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+T+Y+A">T. Y. Alvin Liu</a>, <a href="/search/cs?searchtype=author&query=Gomez%2C+C">Catalina Gomez</a>, <a href="/search/cs?searchtype=author&query=Correa%2C+Z">Zelia Correa</a>, <a href="/search/cs?searchtype=author&query=Unberath%2C+M">Mathias Unberath</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2108.06246v1-abstract-short" style="display: inline;"> Algorithmic decision support is rapidly becoming a staple of personalized medicine, especially for high-stakes recommendations in which access to certain information can drastically alter the course of treatment, and thus, patient outcome; a prominent example is radiomics for cancer subtyping. Because in these scenarios the stakes are high, it is desirable for decision systems to not only provide… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.06246v1-abstract-full').style.display = 'inline'; document.getElementById('2108.06246v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2108.06246v1-abstract-full" style="display: none;"> Algorithmic decision support is rapidly becoming a staple of personalized medicine, especially for high-stakes recommendations in which access to certain information can drastically alter the course of treatment, and thus, patient outcome; a prominent example is radiomics for cancer subtyping. Because in these scenarios the stakes are high, it is desirable for decision systems to not only provide recommendations but supply transparent reasoning in support thereof. For learning-based systems, this can be achieved through an interpretable design of the inference pipeline. Herein we describe an automated yet interpretable system for uveal melanoma subtyping with digital cytology images from fine needle aspiration biopsies. Our method embeds every automatically segmented cell of a candidate cytology image as a point in a 2D manifold defined by many representative slides, which enables reasoning about the cell-level composition of the tissue sample, paving the way for interpretable subtyping of the biopsy. Finally, a rule-based slide-level classification algorithm is trained on the partitions of the circularly distorted 2D manifold. This process results in a simple rule set that is evaluated automatically but highly transparent for human verification. On our in house cytology dataset of 88 uveal melanoma patients, the proposed method achieves an accuracy of 87.5% that compares favorably to all competing approaches, including deep "black box" models. The method comes with a user interface to facilitate interaction with cell-level content, which may offer additional insights for pathological assessment. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.06246v1-abstract-full').style.display = 'none'; document.getElementById('2108.06246v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by ICML 2021 workshop of ILHM</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2012.05940">arXiv:2012.05940</a> <span> [<a href="https://arxiv.org/pdf/2012.05940">pdf</a>, <a href="https://arxiv.org/format/2012.05940">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> A Simplistic Machine Learning Approach to Contact Tracing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=G%C3%B3mez%2C+C">Carlos G贸mez</a>, <a href="/search/cs?searchtype=author&query=Belton%2C+N">Niamh Belton</a>, <a href="/search/cs?searchtype=author&query=Quach%2C+B">Boi Quach</a>, <a href="/search/cs?searchtype=author&query=Nicholls%2C+J">Jack Nicholls</a>, <a href="/search/cs?searchtype=author&query=Anand%2C+D">Devanshu Anand</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2012.05940v1-abstract-short" style="display: inline;"> This report is based on the modified NIST challenge, Too Close For Too Long, provided by the SFI Centre for Machine Learning (ML-Labs). The modified challenge excludes the time calculation (too long) aspect. By handcrafting features from phone instrumental data we develop two machine learning models, a GBM and an MLP, to estimate distance between two phones. Our method is able to outperform the le… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.05940v1-abstract-full').style.display = 'inline'; document.getElementById('2012.05940v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2012.05940v1-abstract-full" style="display: none;"> This report is based on the modified NIST challenge, Too Close For Too Long, provided by the SFI Centre for Machine Learning (ML-Labs). The modified challenge excludes the time calculation (too long) aspect. By handcrafting features from phone instrumental data we develop two machine learning models, a GBM and an MLP, to estimate distance between two phones. Our method is able to outperform the leading NIST challenge result by the Hong Kong University of Science and Technology (HKUST) by a significant margin. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.05940v1-abstract-full').style.display = 'none'; document.getElementById('2012.05940v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 December, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">6 pages, 7 tables, 2 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2008.05413">arXiv:2008.05413</a> <span> [<a href="https://arxiv.org/pdf/2008.05413">pdf</a>, <a href="https://arxiv.org/format/2008.05413">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Look here! A parametric learning based approach to redirect visual attention </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mejjati%2C+Y+A">Youssef Alami Mejjati</a>, <a href="/search/cs?searchtype=author&query=Gomez%2C+C+F">Celso F. Gomez</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+K+I">Kwang In Kim</a>, <a href="/search/cs?searchtype=author&query=Shechtman%2C+E">Eli Shechtman</a>, <a href="/search/cs?searchtype=author&query=Bylinskii%2C+Z">Zoya Bylinskii</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2008.05413v1-abstract-short" style="display: inline;"> Across photography, marketing, and website design, being able to direct the viewer's attention is a powerful tool. Motivated by professional workflows, we introduce an automatic method to make an image region more attention-capturing via subtle image edits that maintain realism and fidelity to the original. From an input image and a user-provided mask, our GazeShiftNet model predicts a distinct se… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.05413v1-abstract-full').style.display = 'inline'; document.getElementById('2008.05413v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2008.05413v1-abstract-full" style="display: none;"> Across photography, marketing, and website design, being able to direct the viewer's attention is a powerful tool. Motivated by professional workflows, we introduce an automatic method to make an image region more attention-capturing via subtle image edits that maintain realism and fidelity to the original. From an input image and a user-provided mask, our GazeShiftNet model predicts a distinct set of global parametric transformations to be applied to the foreground and background image regions separately. We present the results of quantitative and qualitative experiments that demonstrate improvements over prior state-of-the-art. In contrast to existing attention shifting algorithms, our global parametric approach better preserves image semantics and avoids typical generative artifacts. Our edits enable inference at interactive rates on any image size, and easily generalize to videos. Extensions of our model allow for multi-style edits and the ability to both increase and attenuate attention in an image region. Furthermore, users can customize the edited images by dialing the edits up or down via interpolations in parameter space. This paper presents a practical tool that can simplify future image editing pipelines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.05413v1-abstract-full').style.display = 'none'; document.getElementById('2008.05413v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 August, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in ECCV 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2006.13163">arXiv:2006.13163</a> <span> [<a href="https://arxiv.org/pdf/2006.13163">pdf</a>, <a href="https://arxiv.org/format/2006.13163">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Instrumentation and Methods for Astrophysics">astro-ph.IM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.3847/1538-4365/aba267">10.3847/1538-4365/aba267 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> MANTRA: A Machine Learning reference lightcurve dataset for astronomical transient event recognition </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Neira%2C+M">Mauricio Neira</a>, <a href="/search/cs?searchtype=author&query=G%C3%B3mez%2C+C">Catalina G贸mez</a>, <a href="/search/cs?searchtype=author&query=Su%C3%A1rez-P%C3%A9rez%2C+J+F">John F. Su谩rez-P茅rez</a>, <a href="/search/cs?searchtype=author&query=G%C3%B3mez%2C+D+A">Diego A. G贸mez</a>, <a href="/search/cs?searchtype=author&query=Reyes%2C+J+P">Juan Pablo Reyes</a>, <a href="/search/cs?searchtype=author&query=Hoyos%2C+M+H">Marcela Hern谩ndez Hoyos</a>, <a href="/search/cs?searchtype=author&query=Arbel%C3%A1ez%2C+P">Pablo Arbel谩ez</a>, <a href="/search/cs?searchtype=author&query=Forero-Romero%2C+J+E">Jaime E. Forero-Romero</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2006.13163v2-abstract-short" style="display: inline;"> We introduce MANTRA, an annotated dataset of 4869 transient and 71207 non-transient object lightcurves built from the Catalina Real Time Transient Survey. We provide public access to this dataset as a plain text file to facilitate standardized quantitative comparison of astronomical transient event recognition algorithms. Some of the classes included in the dataset are: supernovae, cataclysmic var… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.13163v2-abstract-full').style.display = 'inline'; document.getElementById('2006.13163v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2006.13163v2-abstract-full" style="display: none;"> We introduce MANTRA, an annotated dataset of 4869 transient and 71207 non-transient object lightcurves built from the Catalina Real Time Transient Survey. We provide public access to this dataset as a plain text file to facilitate standardized quantitative comparison of astronomical transient event recognition algorithms. Some of the classes included in the dataset are: supernovae, cataclysmic variables, active galactic nuclei, high proper motion stars, blazars and flares. As an example of the tasks that can be performed on the dataset we experiment with multiple data pre-processing methods, feature selection techniques and popular machine learning algorithms (Support Vector Machines, Random Forests and Neural Networks). We assess quantitative performance in two classification tasks: binary (transient/non-transient) and eight-class classification. The best performing algorithm in both tasks is the Random Forest Classifier. It achieves an F1-score of 96.25% in the binary classification and 52.79% in the eight-class classification. For the eight-class classification, non-transients ( 96.83% ) is the class with the highest F1-score, while the lowest corresponds to high-proper-motion stars ( 16.79% ); for supernovae it achieves a value of 54.57% , close to the average across classes. The next release of MANTRA includes images and benchmarks with deep learning models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.13163v2-abstract-full').style.display = 'none'; document.getElementById('2006.13163v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ApJS accepted, 17 pages, 14 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2004.13877">arXiv:2004.13877</a> <span> [<a href="https://arxiv.org/pdf/2004.13877">pdf</a>, <a href="https://arxiv.org/format/2004.13877">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Instrumentation and Methods for Astrophysics">astro-ph.IM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1093/mnras/staa2973">10.1093/mnras/staa2973 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Classifying Image Sequences of Astronomical Transients with Deep Neural Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=G%C3%B3mez%2C+C">Catalina G贸mez</a>, <a href="/search/cs?searchtype=author&query=Neira%2C+M">Mauricio Neira</a>, <a href="/search/cs?searchtype=author&query=Hoyos%2C+M+H">Marcela Hern谩ndez Hoyos</a>, <a href="/search/cs?searchtype=author&query=Arbel%C3%A1ez%2C+P">Pablo Arbel谩ez</a>, <a href="/search/cs?searchtype=author&query=Forero-Romero%2C+J+E">Jaime E. Forero-Romero</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2004.13877v2-abstract-short" style="display: inline;"> Supervised classification of temporal sequences of astronomical images into meaningful transient astrophysical phenomena has been considered a hard problem because it requires the intervention of human experts. The classifier uses the expert's knowledge to find heuristic features to process the images, for instance, by performing image subtraction or by extracting sparse information such as flux t… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.13877v2-abstract-full').style.display = 'inline'; document.getElementById('2004.13877v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2004.13877v2-abstract-full" style="display: none;"> Supervised classification of temporal sequences of astronomical images into meaningful transient astrophysical phenomena has been considered a hard problem because it requires the intervention of human experts. The classifier uses the expert's knowledge to find heuristic features to process the images, for instance, by performing image subtraction or by extracting sparse information such as flux time series, also known as light curves. We present a successful deep learning approach that learns directly from imaging data. Our method models explicitly the spatio-temporal patterns with Deep Convolutional Neural Networks and Gated Recurrent Units. We train these deep neural networks using 1.3 million real astronomical images from the Catalina Real-Time Transient Survey to classify the sequences into five different types of astronomical transient classes. The TAO-Net (for Transient Astronomical Objects Network) architecture outperforms the results from random forest classification on light curves by 10 percentage points as measured by the F1 score for each class; the average F1 over classes goes from $45\%$ with random forest classification to $55\%$ with TAO-Net. This achievement with TAO-Net opens the possibility to develop new deep learning architectures for early transient detection. We make available the training dataset and trained models of TAO-Net to allow for future extensions of this work. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.13877v2-abstract-full').style.display = 'none'; document.getElementById('2004.13877v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 7 figures. MNRAS accepted</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1909.08386">arXiv:1909.08386</a> <span> [<a href="https://arxiv.org/pdf/1909.08386">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Intelligent Active Queue Management Using Explicit Congestion Notification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gomez%2C+C+A">Cesar A. Gomez</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+X">Xianbin Wang</a>, <a href="/search/cs?searchtype=author&query=Shami%2C+A">Abdallah Shami</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1909.08386v1-abstract-short" style="display: inline;"> As more end devices are getting connected, the Internet will become more congested. Various congestion control techniques have been developed either on transport or network layers. Active Queue Management (AQM) is a paradigm that aims to mitigate the congestion on the network layer through active buffer control to avoid overflow. However, finding the right parameters for an AQM scheme is challengi… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.08386v1-abstract-full').style.display = 'inline'; document.getElementById('1909.08386v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1909.08386v1-abstract-full" style="display: none;"> As more end devices are getting connected, the Internet will become more congested. Various congestion control techniques have been developed either on transport or network layers. Active Queue Management (AQM) is a paradigm that aims to mitigate the congestion on the network layer through active buffer control to avoid overflow. However, finding the right parameters for an AQM scheme is challenging, due to the complexity and dynamics of the networks. On the other hand, the Explicit Congestion Notification (ECN) mechanism is a solution that makes visible incipient congestion on the network layer to the transport layer. In this work, we propose to exploit the ECN information to improve AQM algorithms by applying Machine Learning techniques. Our intelligent method uses an artificial neural network to predict congestion and an AQM parameter tuner based on reinforcement learning. The evaluation results show that our solution can enhance the performance of deployed AQM, using the existing TCP congestion control mechanisms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.08386v1-abstract-full').style.display = 'none'; document.getElementById('1909.08386v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 August, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To be presented at the IEEE Global Communications Conference -GLOBECOM- 2019</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1811.08414">arXiv:1811.08414</a> <span> [<a href="https://arxiv.org/pdf/1811.08414">pdf</a>, <a href="https://arxiv.org/format/1811.08414">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-030-27544-0_3">10.1007/978-3-030-27544-0_3 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Visual SLAM-based Localization and Navigation for Service Robots: The Pepper Case </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=G%C3%B3mez%2C+C">Cristopher G贸mez</a>, <a href="/search/cs?searchtype=author&query=Mattamala%2C+M">Mat铆as Mattamala</a>, <a href="/search/cs?searchtype=author&query=Resink%2C+T">Tim Resink</a>, <a href="/search/cs?searchtype=author&query=Ruiz-del-Solar%2C+J">Javier Ruiz-del-Solar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1811.08414v1-abstract-short" style="display: inline;"> We propose a Visual-SLAM based localization and navigation system for service robots. Our system is built on top of the ORB-SLAM monocular system but extended by the inclusion of wheel odometry in the estimation procedures. As a case study, the proposed system is validated using the Pepper robot, whose short-range LIDARs and RGB-D camera do not allow the robot to self-localize in large environment… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.08414v1-abstract-full').style.display = 'inline'; document.getElementById('1811.08414v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1811.08414v1-abstract-full" style="display: none;"> We propose a Visual-SLAM based localization and navigation system for service robots. Our system is built on top of the ORB-SLAM monocular system but extended by the inclusion of wheel odometry in the estimation procedures. As a case study, the proposed system is validated using the Pepper robot, whose short-range LIDARs and RGB-D camera do not allow the robot to self-localize in large environments. The localization system is tested in navigation tasks using Pepper in two different environments: a medium-size laboratory, and a large-size hall. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.08414v1-abstract-full').style.display = 'none'; document.getElementById('1811.08414v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 6 figures. Presented in RoboCup Symposium 2018. Final version will appear in Springer</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1811.08352">arXiv:1811.08352</a> <span> [<a href="https://arxiv.org/pdf/1811.08352">pdf</a>, <a href="https://arxiv.org/format/1811.08352">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Near Real-Time Object Recognition for Pepper based on Deep Neural Networks Running on a Backpack </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Reyes%2C+E">Esteban Reyes</a>, <a href="/search/cs?searchtype=author&query=G%C3%B3mez%2C+C">Cristopher G贸mez</a>, <a href="/search/cs?searchtype=author&query=Norambuena%2C+E">Esteban Norambuena</a>, <a href="/search/cs?searchtype=author&query=Ruiz-del-Solar%2C+J">Javier Ruiz-del-Solar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1811.08352v1-abstract-short" style="display: inline;"> The main goal of the paper is to provide Pepper with a near real-time object recognition system based on deep neural networks. The proposed system is based on YOLO (You Only Look Once), a deep neural network that is able to detect and recognize objects robustly and at a high speed. In addition, considering that YOLO cannot be run in the Pepper's internal computer in near real-time, we propose to u… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.08352v1-abstract-full').style.display = 'inline'; document.getElementById('1811.08352v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1811.08352v1-abstract-full" style="display: none;"> The main goal of the paper is to provide Pepper with a near real-time object recognition system based on deep neural networks. The proposed system is based on YOLO (You Only Look Once), a deep neural network that is able to detect and recognize objects robustly and at a high speed. In addition, considering that YOLO cannot be run in the Pepper's internal computer in near real-time, we propose to use a Backpack for Pepper, which holds a Jetson TK1 card and a battery. By using this card, Pepper is able to robustly detect and recognize objects in images of 320x320 pixels at about 5 frames per second. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.08352v1-abstract-full').style.display = 'none'; document.getElementById('1811.08352v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Proceedings of 22th RoboCup International Symposium, Montreal, Canada, 2018</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Proceedings of 22th RoboCup International Symposium, Montreal, Canada, 2018 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1801.02564">arXiv:1801.02564</a> <span> [<a href="https://arxiv.org/pdf/1801.02564">pdf</a>, <a href="https://arxiv.org/ps/1801.02564">ps</a>, <a href="https://arxiv.org/format/1801.02564">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Functional Analysis">math.FA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Number Theory">math.NT</span> </div> </div> <p class="title is-5 mathjax"> Sampling Almost Periodic and related Functions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ferri%2C+S">Stefano Ferri</a>, <a href="/search/cs?searchtype=author&query=Galindo%2C+J">Jorge Galindo</a>, <a href="/search/cs?searchtype=author&query=G%C3%B3mez%2C+C">Camilo G贸mez</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1801.02564v2-abstract-short" style="display: inline;"> We consider certain finite sets of circle-valued functions defined on intervals of real numbers and estimate how large the intervals must be for the values of these functions to be uniformly distributed in an approximate way. This is used to establish some general conditions under which a random construction introduced by Katznelson for the integers yields sets that are dense in the Bohr group.… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1801.02564v2-abstract-full').style.display = 'inline'; document.getElementById('1801.02564v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1801.02564v2-abstract-full" style="display: none;"> We consider certain finite sets of circle-valued functions defined on intervals of real numbers and estimate how large the intervals must be for the values of these functions to be uniformly distributed in an approximate way. This is used to establish some general conditions under which a random construction introduced by Katznelson for the integers yields sets that are dense in the Bohr group. We obtain in this way very sparse sets of real numbers (and of integers) on which two different almost periodic functions cannot agree, what makes them amenable to be used in sampling theorems for these functions. These sets can be made as sparse as to have zero asymptotic density or as to be t-sets, i.e., to be sets that intersect any of their translates in a bounded set. Many of these results are proved not only for almost periodic functions but also for classes of functions generated by more general complex exponential functions, including chirps. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1801.02564v2-abstract-full').style.display = 'none'; document.getElementById('1801.02564v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 December, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 January, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">22 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 43A60; 11K70; 42A75; 94A20 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/0911.2174">arXiv:0911.2174</a> <span> [<a href="https://arxiv.org/pdf/0911.2174">pdf</a>, <a href="https://arxiv.org/format/0911.2174">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="High Energy Physics - Lattice">hep-lat</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> QPACE -- a QCD parallel computer based on Cell processors </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Baier%2C+H">H. Baier</a>, <a href="/search/cs?searchtype=author&query=Boettiger%2C+H">H. Boettiger</a>, <a href="/search/cs?searchtype=author&query=Drochner%2C+M">M. Drochner</a>, <a href="/search/cs?searchtype=author&query=Eicker%2C+N">N. Eicker</a>, <a href="/search/cs?searchtype=author&query=Fischer%2C+U">U. Fischer</a>, <a href="/search/cs?searchtype=author&query=Fodor%2C+Z">Z. Fodor</a>, <a href="/search/cs?searchtype=author&query=Frommer%2C+A">A. Frommer</a>, <a href="/search/cs?searchtype=author&query=Gomez%2C+C">C. Gomez</a>, <a href="/search/cs?searchtype=author&query=Goldrian%2C+G">G. Goldrian</a>, <a href="/search/cs?searchtype=author&query=Heybrock%2C+S">S. Heybrock</a>, <a href="/search/cs?searchtype=author&query=Hierl%2C+D">D. Hierl</a>, <a href="/search/cs?searchtype=author&query=H%C3%BCsken%2C+M">M. H眉sken</a>, <a href="/search/cs?searchtype=author&query=Huth%2C+T">T. Huth</a>, <a href="/search/cs?searchtype=author&query=Krill%2C+B">B. Krill</a>, <a href="/search/cs?searchtype=author&query=Lauritsen%2C+J">J. Lauritsen</a>, <a href="/search/cs?searchtype=author&query=Lippert%2C+T">T. Lippert</a>, <a href="/search/cs?searchtype=author&query=Maurer%2C+T">T. Maurer</a>, <a href="/search/cs?searchtype=author&query=Mendl%2C+B">B. Mendl</a>, <a href="/search/cs?searchtype=author&query=Meyer%2C+N">N. Meyer</a>, <a href="/search/cs?searchtype=author&query=Nobile%2C+A">A. Nobile</a>, <a href="/search/cs?searchtype=author&query=Ouda%2C+I">I. Ouda</a>, <a href="/search/cs?searchtype=author&query=Pivanti%2C+M">M. Pivanti</a>, <a href="/search/cs?searchtype=author&query=Pleiter%2C+D">D. Pleiter</a>, <a href="/search/cs?searchtype=author&query=Ries%2C+M">M. Ries</a>, <a href="/search/cs?searchtype=author&query=Sch%C3%A4fer%2C+A">A. Sch盲fer</a> , et al. (10 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="0911.2174v3-abstract-short" style="display: inline;"> QPACE is a novel parallel computer which has been developed to be primarily used for lattice QCD simulations. The compute power is provided by the IBM PowerXCell 8i processor, an enhanced version of the Cell processor that is used in the Playstation 3. The QPACE nodes are interconnected by a custom, application optimized 3-dimensional torus network implemented on an FPGA. To achieve the very hig… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('0911.2174v3-abstract-full').style.display = 'inline'; document.getElementById('0911.2174v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="0911.2174v3-abstract-full" style="display: none;"> QPACE is a novel parallel computer which has been developed to be primarily used for lattice QCD simulations. The compute power is provided by the IBM PowerXCell 8i processor, an enhanced version of the Cell processor that is used in the Playstation 3. The QPACE nodes are interconnected by a custom, application optimized 3-dimensional torus network implemented on an FPGA. To achieve the very high packaging density of 26 TFlops per rack a new water cooling concept has been developed and successfully realized. In this paper we give an overview of the architecture and highlight some important technical details of the system. Furthermore, we provide initial performance results and report on the installation of 8 QPACE racks providing an aggregate peak performance of 200 TFlops. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('0911.2174v3-abstract-full').style.display = 'none'; document.getElementById('0911.2174v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 December, 2009; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 November, 2009; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2009. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">21 pages. Poster by T. Maurer and plenary talk by D. Pleiter presented at the "XXVII International Symposium on Lattice Field Theory", July 26-31 2009, Peking University, Beijing, China. Information on recent Green500 ranking added and list of authors extended</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> PoS LAT2009:001,2009 </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>