CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;44 of 44 results for author: <span class="mathjax">Cohen, G</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Cohen%2C+G">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Cohen, G"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Cohen%2C+G&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Cohen, G"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.11233">arXiv:2411.11233</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.11233">pdf</a>, <a href="https://arxiv.org/format/2411.11233">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Noise Filtering Benchmark for Neuromorphic Satellites Observations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Arja%2C+S">Sami Arja</a>, <a href="/search/cs?searchtype=author&amp;query=Marcireau%2C+A">Alexandre Marcireau</a>, <a href="/search/cs?searchtype=author&amp;query=Ralph%2C+N+O">Nicholas Owen Ralph</a>, <a href="/search/cs?searchtype=author&amp;query=Afshar%2C+S">Saeed Afshar</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gregory Cohen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.11233v1-abstract-short" style="display: inline;"> Event cameras capture sparse, asynchronous brightness changes which offer high temporal resolution, high dynamic range, low power consumption, and sparse data output. These advantages make them ideal for Space Situational Awareness, particularly in detecting resident space objects moving within a telescope&#39;s field of view. However, the output from event cameras often includes substantial backgroun&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11233v1-abstract-full').style.display = 'inline'; document.getElementById('2411.11233v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.11233v1-abstract-full" style="display: none;"> Event cameras capture sparse, asynchronous brightness changes which offer high temporal resolution, high dynamic range, low power consumption, and sparse data output. These advantages make them ideal for Space Situational Awareness, particularly in detecting resident space objects moving within a telescope&#39;s field of view. However, the output from event cameras often includes substantial background activity noise, which is known to be more prevalent in low-light conditions. This noise can overwhelm the sparse events generated by satellite signals, making detection and tracking more challenging. Existing noise-filtering algorithms struggle in these scenarios because they are typically designed for denser scenes, where losing some signal is acceptable. This limitation hinders the application of event cameras in complex, real-world environments where signals are extremely sparse. In this paper, we propose new event-driven noise-filtering algorithms specifically designed for very sparse scenes. We categorise the algorithms into logical-based and learning-based approaches and benchmark their performance against 11 state-of-the-art noise-filtering algorithms, evaluating how effectively they remove noise and hot pixels while preserving the signal. Their performance was quantified by measuring signal retention and noise removal accuracy, with results reported using ROC curves across the parameter space. Additionally, we introduce a new high-resolution satellite dataset with ground truth from a real-world platform under various noise conditions, which we have made publicly available. Code, dataset, and trained weights are available at \url{https://github.com/samiarja/dvs_sparse_filter}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.11233v1-abstract-full').style.display = 'none'; document.getElementById('2411.11233v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">17 pages, 8 figures, 1 table</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.12793">arXiv:2410.12793</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.12793">pdf</a>, <a href="https://arxiv.org/format/2410.12793">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Environment Scan of Generative AI Infrastructure for Clinical and Translational Science </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Idnay%2C+B">Betina Idnay</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Z">Zihan Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Adams%2C+W+G">William G. Adams</a>, <a href="/search/cs?searchtype=author&amp;query=Adibuzzaman%2C+M">Mohammad Adibuzzaman</a>, <a href="/search/cs?searchtype=author&amp;query=Anderson%2C+N+R">Nicholas R. Anderson</a>, <a href="/search/cs?searchtype=author&amp;query=Bahroos%2C+N">Neil Bahroos</a>, <a href="/search/cs?searchtype=author&amp;query=Bell%2C+D+S">Douglas S. Bell</a>, <a href="/search/cs?searchtype=author&amp;query=Bumgardner%2C+C">Cody Bumgardner</a>, <a href="/search/cs?searchtype=author&amp;query=Campion%2C+T">Thomas Campion</a>, <a href="/search/cs?searchtype=author&amp;query=Castro%2C+M">Mario Castro</a>, <a href="/search/cs?searchtype=author&amp;query=Cimino%2C+J+J">James J. Cimino</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+I+G">I. Glenn Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Dorr%2C+D">David Dorr</a>, <a href="/search/cs?searchtype=author&amp;query=Elkin%2C+P+L">Peter L Elkin</a>, <a href="/search/cs?searchtype=author&amp;query=Fan%2C+J+W">Jungwei W. Fan</a>, <a href="/search/cs?searchtype=author&amp;query=Ferris%2C+T">Todd Ferris</a>, <a href="/search/cs?searchtype=author&amp;query=Foran%2C+D+J">David J. Foran</a>, <a href="/search/cs?searchtype=author&amp;query=Hanauer%2C+D">David Hanauer</a>, <a href="/search/cs?searchtype=author&amp;query=Hogarth%2C+M">Mike Hogarth</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+K">Kun Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Kalpathy-Cramer%2C+J">Jayashree Kalpathy-Cramer</a>, <a href="/search/cs?searchtype=author&amp;query=Kandpal%2C+M">Manoj Kandpal</a>, <a href="/search/cs?searchtype=author&amp;query=Karnik%2C+N+S">Niranjan S. Karnik</a>, <a href="/search/cs?searchtype=author&amp;query=Katoch%2C+A">Avnish Katoch</a>, <a href="/search/cs?searchtype=author&amp;query=Lai%2C+A+M">Albert M. Lai</a> , et al. (32 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.12793v1-abstract-short" style="display: inline;"> This study reports a comprehensive environmental scan of the generative AI (GenAI) infrastructure in the national network for clinical and translational science across 36 institutions supported by the Clinical and Translational Science Award (CTSA) Program led by the National Center for Advancing Translational Sciences (NCATS) of the National Institutes of Health (NIH) at the United States. With t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.12793v1-abstract-full').style.display = 'inline'; document.getElementById('2410.12793v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.12793v1-abstract-full" style="display: none;"> This study reports a comprehensive environmental scan of the generative AI (GenAI) infrastructure in the national network for clinical and translational science across 36 institutions supported by the Clinical and Translational Science Award (CTSA) Program led by the National Center for Advancing Translational Sciences (NCATS) of the National Institutes of Health (NIH) at the United States. With the rapid advancement of GenAI technologies, including large language models (LLMs), healthcare institutions face unprecedented opportunities and challenges. This research explores the current status of GenAI integration, focusing on stakeholder roles, governance structures, and ethical considerations by administering a survey among leaders of health institutions (i.e., representing academic medical centers and health systems) to assess the institutional readiness and approach towards GenAI adoption. Key findings indicate a diverse range of institutional strategies, with most organizations in the experimental phase of GenAI deployment. The study highlights significant variations in governance models, with a strong preference for centralized decision-making but notable gaps in workforce training and ethical oversight. Moreover, the results underscore the need for a more coordinated approach to GenAI governance, emphasizing collaboration among senior leaders, clinicians, information technology staff, and researchers. Our analysis also reveals concerns regarding GenAI bias, data security, and stakeholder trust, which must be addressed to ensure the ethical and effective implementation of GenAI technologies. This study offers valuable insights into the challenges and opportunities of GenAI integration in healthcare, providing a roadmap for institutions aiming to leverage GenAI for improved quality of care and operational efficiency. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.12793v1-abstract-full').style.display = 'none'; document.getElementById('2410.12793v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.20370">arXiv:2409.20370</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.20370">pdf</a>, <a href="https://arxiv.org/format/2409.20370">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> The Perfect Blend: Redefining RLHF with Mixture of Judges </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Xu%2C+T">Tengyu Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Helenowski%2C+E">Eryk Helenowski</a>, <a href="/search/cs?searchtype=author&amp;query=Sankararaman%2C+K+A">Karthik Abinav Sankararaman</a>, <a href="/search/cs?searchtype=author&amp;query=Jin%2C+D">Di Jin</a>, <a href="/search/cs?searchtype=author&amp;query=Peng%2C+K">Kaiyan Peng</a>, <a href="/search/cs?searchtype=author&amp;query=Han%2C+E">Eric Han</a>, <a href="/search/cs?searchtype=author&amp;query=Nie%2C+S">Shaoliang Nie</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+C">Chen Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H">Hejia Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+W">Wenxuan Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Zeng%2C+Z">Zhouhao Zeng</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+Y">Yun He</a>, <a href="/search/cs?searchtype=author&amp;query=Mandyam%2C+K">Karishma Mandyam</a>, <a href="/search/cs?searchtype=author&amp;query=Talabzadeh%2C+A">Arya Talabzadeh</a>, <a href="/search/cs?searchtype=author&amp;query=Khabsa%2C+M">Madian Khabsa</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gabriel Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Tian%2C+Y">Yuandong Tian</a>, <a href="/search/cs?searchtype=author&amp;query=Ma%2C+H">Hao Ma</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Sinong Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Fang%2C+H">Han Fang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.20370v1-abstract-short" style="display: inline;"> Reinforcement learning from human feedback (RLHF) has become the leading approach for fine-tuning large language models (LLM). However, RLHF has limitations in multi-task learning (MTL) due to challenges of reward hacking and extreme multi-objective optimization (i.e., trade-off of multiple and/or sometimes conflicting objectives). Applying RLHF for MTL currently requires careful tuning of the wei&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.20370v1-abstract-full').style.display = 'inline'; document.getElementById('2409.20370v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.20370v1-abstract-full" style="display: none;"> Reinforcement learning from human feedback (RLHF) has become the leading approach for fine-tuning large language models (LLM). However, RLHF has limitations in multi-task learning (MTL) due to challenges of reward hacking and extreme multi-objective optimization (i.e., trade-off of multiple and/or sometimes conflicting objectives). Applying RLHF for MTL currently requires careful tuning of the weights for reward model and data combinations. This is often done via human intuition and does not generalize. In this work, we introduce a novel post-training paradigm which we called Constrained Generative Policy Optimization (CGPO). The core of CGPO is Mixture of Judges (MoJ) with cost-efficient constrained policy optimization with stratification, which can identify the perfect blend in RLHF in a principled manner. It shows strong empirical results with theoretical guarantees, does not require extensive hyper-parameter tuning, and is plug-and-play in common post-training pipelines. Together, this can detect and mitigate reward hacking behaviors while reaching a pareto-optimal point across an extremely large number of objectives. Our empirical evaluations demonstrate that CGPO significantly outperforms standard RLHF algorithms like PPO and DPO across various tasks including general chat, STEM questions, instruction following, and coding. Specifically, CGPO shows improvements of 7.4% in AlpacaEval-2 (general chat), 12.5% in Arena-Hard (STEM &amp; reasoning), and consistent gains in other domains like math and coding. Notably, PPO, while commonly used, is prone to severe reward hacking in popular coding benchmarks, which CGPO successfully addresses. This breakthrough in RLHF not only tackles reward hacking and extreme multi-objective optimization challenges but also advances the state-of-the-art in aligning general-purpose LLMs for diverse applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.20370v1-abstract-full').style.display = 'none'; document.getElementById('2409.20370v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">submitted to conference</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.12570">arXiv:2408.12570</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.12570">pdf</a>, <a href="https://arxiv.org/format/2408.12570">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Jamba-1.5: Hybrid Transformer-Mamba Models at Scale </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jamba+Team"> Jamba Team</a>, <a href="/search/cs?searchtype=author&amp;query=Lenz%2C+B">Barak Lenz</a>, <a href="/search/cs?searchtype=author&amp;query=Arazi%2C+A">Alan Arazi</a>, <a href="/search/cs?searchtype=author&amp;query=Bergman%2C+A">Amir Bergman</a>, <a href="/search/cs?searchtype=author&amp;query=Manevich%2C+A">Avshalom Manevich</a>, <a href="/search/cs?searchtype=author&amp;query=Peleg%2C+B">Barak Peleg</a>, <a href="/search/cs?searchtype=author&amp;query=Aviram%2C+B">Ben Aviram</a>, <a href="/search/cs?searchtype=author&amp;query=Almagor%2C+C">Chen Almagor</a>, <a href="/search/cs?searchtype=author&amp;query=Fridman%2C+C">Clara Fridman</a>, <a href="/search/cs?searchtype=author&amp;query=Padnos%2C+D">Dan Padnos</a>, <a href="/search/cs?searchtype=author&amp;query=Gissin%2C+D">Daniel Gissin</a>, <a href="/search/cs?searchtype=author&amp;query=Jannai%2C+D">Daniel Jannai</a>, <a href="/search/cs?searchtype=author&amp;query=Muhlgay%2C+D">Dor Muhlgay</a>, <a href="/search/cs?searchtype=author&amp;query=Zimberg%2C+D">Dor Zimberg</a>, <a href="/search/cs?searchtype=author&amp;query=Gerber%2C+E+M">Edden M Gerber</a>, <a href="/search/cs?searchtype=author&amp;query=Dolev%2C+E">Elad Dolev</a>, <a href="/search/cs?searchtype=author&amp;query=Krakovsky%2C+E">Eran Krakovsky</a>, <a href="/search/cs?searchtype=author&amp;query=Safahi%2C+E">Erez Safahi</a>, <a href="/search/cs?searchtype=author&amp;query=Schwartz%2C+E">Erez Schwartz</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gal Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Shachaf%2C+G">Gal Shachaf</a>, <a href="/search/cs?searchtype=author&amp;query=Rozenblum%2C+H">Haim Rozenblum</a>, <a href="/search/cs?searchtype=author&amp;query=Bata%2C+H">Hofit Bata</a>, <a href="/search/cs?searchtype=author&amp;query=Blass%2C+I">Ido Blass</a>, <a href="/search/cs?searchtype=author&amp;query=Magar%2C+I">Inbal Magar</a> , et al. (36 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.12570v1-abstract-short" style="display: inline;"> We present Jamba-1.5, new instruction-tuned large language models based on our Jamba architecture. Jamba is a hybrid Transformer-Mamba mixture of experts architecture, providing high throughput and low memory usage across context lengths, while retaining the same or better quality as Transformer models. We release two model sizes: Jamba-1.5-Large, with 94B active parameters, and Jamba-1.5-Mini, wi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.12570v1-abstract-full').style.display = 'inline'; document.getElementById('2408.12570v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.12570v1-abstract-full" style="display: none;"> We present Jamba-1.5, new instruction-tuned large language models based on our Jamba architecture. Jamba is a hybrid Transformer-Mamba mixture of experts architecture, providing high throughput and low memory usage across context lengths, while retaining the same or better quality as Transformer models. We release two model sizes: Jamba-1.5-Large, with 94B active parameters, and Jamba-1.5-Mini, with 12B active parameters. Both models are fine-tuned for a variety of conversational and instruction-following capabilties, and have an effective context length of 256K tokens, the largest amongst open-weight models. To support cost-effective inference, we introduce ExpertsInt8, a novel quantization technique that allows fitting Jamba-1.5-Large on a machine with 8 80GB GPUs when processing 256K-token contexts without loss of quality. When evaluated on a battery of academic and chatbot benchmarks, Jamba-1.5 models achieve excellent results while providing high throughput and outperforming other open-weight models on long-context benchmarks. The model weights for both sizes are publicly available under the Jamba Open Model License and we release ExpertsInt8 as open source. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.12570v1-abstract-full').style.display = 'none'; document.getElementById('2408.12570v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Webpage: https://www.ai21.com/jamba</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.15209">arXiv:2405.15209</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.15209">pdf</a>, <a href="https://arxiv.org/format/2405.15209">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Motion Segmentation for Neuromorphic Aerial Surveillance </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Arja%2C+S">Sami Arja</a>, <a href="/search/cs?searchtype=author&amp;query=Marcireau%2C+A">Alexandre Marcireau</a>, <a href="/search/cs?searchtype=author&amp;query=Afshar%2C+S">Saeed Afshar</a>, <a href="/search/cs?searchtype=author&amp;query=Ramesh%2C+B">Bharath Ramesh</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gregory Cohen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.15209v2-abstract-short" style="display: inline;"> Aerial surveillance demands rapid and precise detection of moving objects in dynamic environments. Event cameras, which draw inspiration from biological vision systems, present a promising alternative to frame-based sensors due to their exceptional temporal resolution, superior dynamic range, and minimal power requirements. Unlike traditional frame-based sensors that capture redundant information&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.15209v2-abstract-full').style.display = 'inline'; document.getElementById('2405.15209v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.15209v2-abstract-full" style="display: none;"> Aerial surveillance demands rapid and precise detection of moving objects in dynamic environments. Event cameras, which draw inspiration from biological vision systems, present a promising alternative to frame-based sensors due to their exceptional temporal resolution, superior dynamic range, and minimal power requirements. Unlike traditional frame-based sensors that capture redundant information at fixed intervals, event cameras asynchronously record pixel-level brightness changes, providing a continuous and efficient data stream ideal for fast motion segmentation. While these sensors are ideal for fast motion segmentation, existing event-based motion segmentation methods often suffer from limitations such as the need for per-scene parameter tuning or reliance on manual labelling, hindering their scalability and practical deployment. In this paper, we address these challenges by introducing a novel motion segmentation method that leverages self-supervised vision transformers on both event data and optical flow information. Our approach eliminates the need for human annotations and reduces dependency on scene-specific parameters. In this paper, we used the EVK4-HD Prophesee event camera onboard a highly dynamic aerial platform in urban settings. We conduct extensive evaluations of our framework across multiple datasets, demonstrating state-of-the-art performance compared to existing benchmarks. Our method can effectively handle various types of motion and an arbitrary number of moving objects. Code and dataset are available at: \url{https://samiarja.github.io/evairborne/} <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.15209v2-abstract-full').style.display = 'none'; document.getElementById('2405.15209v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">17 pages, 11 figures, 8 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.19887">arXiv:2403.19887</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.19887">pdf</a>, <a href="https://arxiv.org/format/2403.19887">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Jamba: A Hybrid Transformer-Mamba Language Model </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lieber%2C+O">Opher Lieber</a>, <a href="/search/cs?searchtype=author&amp;query=Lenz%2C+B">Barak Lenz</a>, <a href="/search/cs?searchtype=author&amp;query=Bata%2C+H">Hofit Bata</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gal Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Osin%2C+J">Jhonathan Osin</a>, <a href="/search/cs?searchtype=author&amp;query=Dalmedigos%2C+I">Itay Dalmedigos</a>, <a href="/search/cs?searchtype=author&amp;query=Safahi%2C+E">Erez Safahi</a>, <a href="/search/cs?searchtype=author&amp;query=Meirom%2C+S">Shaked Meirom</a>, <a href="/search/cs?searchtype=author&amp;query=Belinkov%2C+Y">Yonatan Belinkov</a>, <a href="/search/cs?searchtype=author&amp;query=Shalev-Shwartz%2C+S">Shai Shalev-Shwartz</a>, <a href="/search/cs?searchtype=author&amp;query=Abend%2C+O">Omri Abend</a>, <a href="/search/cs?searchtype=author&amp;query=Alon%2C+R">Raz Alon</a>, <a href="/search/cs?searchtype=author&amp;query=Asida%2C+T">Tomer Asida</a>, <a href="/search/cs?searchtype=author&amp;query=Bergman%2C+A">Amir Bergman</a>, <a href="/search/cs?searchtype=author&amp;query=Glozman%2C+R">Roman Glozman</a>, <a href="/search/cs?searchtype=author&amp;query=Gokhman%2C+M">Michael Gokhman</a>, <a href="/search/cs?searchtype=author&amp;query=Manevich%2C+A">Avashalom Manevich</a>, <a href="/search/cs?searchtype=author&amp;query=Ratner%2C+N">Nir Ratner</a>, <a href="/search/cs?searchtype=author&amp;query=Rozen%2C+N">Noam Rozen</a>, <a href="/search/cs?searchtype=author&amp;query=Shwartz%2C+E">Erez Shwartz</a>, <a href="/search/cs?searchtype=author&amp;query=Zusman%2C+M">Mor Zusman</a>, <a href="/search/cs?searchtype=author&amp;query=Shoham%2C+Y">Yoav Shoham</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.19887v2-abstract-short" style="display: inline;"> We present Jamba, a new base large language model based on a novel hybrid Transformer-Mamba mixture-of-experts (MoE) architecture. Specifically, Jamba interleaves blocks of Transformer and Mamba layers, enjoying the benefits of both model families. MoE is added in some of these layers to increase model capacity while keeping active parameter usage manageable. This flexible architecture allows reso&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.19887v2-abstract-full').style.display = 'inline'; document.getElementById('2403.19887v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.19887v2-abstract-full" style="display: none;"> We present Jamba, a new base large language model based on a novel hybrid Transformer-Mamba mixture-of-experts (MoE) architecture. Specifically, Jamba interleaves blocks of Transformer and Mamba layers, enjoying the benefits of both model families. MoE is added in some of these layers to increase model capacity while keeping active parameter usage manageable. This flexible architecture allows resource- and objective-specific configurations. In the particular configuration we have implemented, we end up with a powerful model that fits in a single 80GB GPU. Built at large scale, Jamba provides high throughput and small memory footprint compared to vanilla Transformers, and at the same time state-of-the-art performance on standard language model benchmarks and long-context evaluations. Remarkably, the model presents strong results for up to 256K tokens context length. We study various architectural decisions, such as how to combine Transformer and Mamba layers, and how to mix experts, and show that some of them are crucial in large scale modeling. We also describe several interesting properties of these architectures which the training and evaluation of Jamba have revealed, and plan to release checkpoints from various ablation runs, to encourage further exploration of this novel architecture. We make the weights of our implementation of Jamba publicly available under a permissive license. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.19887v2-abstract-full').style.display = 'none'; document.getElementById('2403.19887v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Webpage: https://www.ai21.com/jamba</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2304.14125">arXiv:2304.14125</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2304.14125">pdf</a>, <a href="https://arxiv.org/format/2304.14125">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Density Invariant Contrast Maximization for Neuromorphic Earth Observations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Arja%2C+S">Sami Arja</a>, <a href="/search/cs?searchtype=author&amp;query=Marcireau%2C+A">Alexandre Marcireau</a>, <a href="/search/cs?searchtype=author&amp;query=Balthazor%2C+R+L">Richard L. Balthazor</a>, <a href="/search/cs?searchtype=author&amp;query=McHarg%2C+M+G">Matthew G. McHarg</a>, <a href="/search/cs?searchtype=author&amp;query=Afshar%2C+S">Saeed Afshar</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gregory Cohen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2304.14125v2-abstract-short" style="display: inline;"> Contrast maximization (CMax) techniques are widely used in event-based vision systems to estimate the motion parameters of the camera and generate high-contrast images. However, these techniques are noise-intolerance and suffer from the multiple extrema problem which arises when the scene contains more noisy events than structure, causing the contrast to be higher at multiple locations. This makes&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.14125v2-abstract-full').style.display = 'inline'; document.getElementById('2304.14125v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2304.14125v2-abstract-full" style="display: none;"> Contrast maximization (CMax) techniques are widely used in event-based vision systems to estimate the motion parameters of the camera and generate high-contrast images. However, these techniques are noise-intolerance and suffer from the multiple extrema problem which arises when the scene contains more noisy events than structure, causing the contrast to be higher at multiple locations. This makes the task of estimating the camera motion extremely challenging, which is a problem for neuromorphic earth observation, because, without a proper estimation of the motion parameters, it is not possible to generate a map with high contrast, causing important details to be lost. Similar methods that use CMax addressed this problem by changing or augmenting the objective function to enable it to converge to the correct motion parameters. Our proposed solution overcomes the multiple extrema and noise-intolerance problems by correcting the warped event before calculating the contrast and offers the following advantages: it does not depend on the event data, it does not require a prior about the camera motion, and keeps the rest of the CMax pipeline unchanged. This is to ensure that the contrast is only high around the correct motion parameters. Our approach enables the creation of better motion-compensated maps through an analytical compensation technique using a novel dataset from the International Space Station (ISS). Code is available at \url{https://github.com/neuromorphicsystems/event_warping} <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.14125v2-abstract-full').style.display = 'none'; document.getElementById('2304.14125v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to 2023 CVPRW Workshop on Event-Based Vision</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.13680">arXiv:2205.13680</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2205.13680">pdf</a>, <a href="https://arxiv.org/format/2205.13680">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Membership Inference Attack Using Self Influence Functions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gilad Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Giryes%2C+R">Raja Giryes</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.13680v1-abstract-short" style="display: inline;"> Member inference (MI) attacks aim to determine if a specific data sample was used to train a machine learning model. Thus, MI is a major privacy threat to models trained on private sensitive data, such as medical records. In MI attacks one may consider the black-box settings, where the model&#39;s parameters and activations are hidden from the adversary, or the white-box case where they are available&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.13680v1-abstract-full').style.display = 'inline'; document.getElementById('2205.13680v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.13680v1-abstract-full" style="display: none;"> Member inference (MI) attacks aim to determine if a specific data sample was used to train a machine learning model. Thus, MI is a major privacy threat to models trained on private sensitive data, such as medical records. In MI attacks one may consider the black-box settings, where the model&#39;s parameters and activations are hidden from the adversary, or the white-box case where they are available to the attacker. In this work, we focus on the latter and present a novel MI attack for it that employs influence functions, or more specifically the samples&#39; self-influence scores, to perform the MI prediction. We evaluate our attack on CIFAR-10, CIFAR-100, and Tiny ImageNet datasets, using versatile architectures such as AlexNet, ResNet, and DenseNet. Our attack method achieves new state-of-the-art results for both training with and without data augmentations. Code is available at https://github.com/giladcohen/sif_mi_attack. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.13680v1-abstract-full').style.display = 'none'; document.getElementById('2205.13680v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2203.00667">arXiv:2203.00667</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2203.00667">pdf</a>, <a href="https://arxiv.org/format/2203.00667">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Generative Adversarial Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gilad Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Giryes%2C+R">Raja Giryes</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2203.00667v1-abstract-short" style="display: inline;"> Generative Adversarial Networks (GANs) are very popular frameworks for generating high-quality data, and are immensely used in both the academia and industry in many domains. Arguably, their most substantial impact has been in the area of computer vision, where they achieve state-of-the-art image generation. This chapter gives an introduction to GANs, by discussing their principle mechanism and pr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.00667v1-abstract-full').style.display = 'inline'; document.getElementById('2203.00667v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2203.00667v1-abstract-full" style="display: none;"> Generative Adversarial Networks (GANs) are very popular frameworks for generating high-quality data, and are immensely used in both the academia and industry in many domains. Arguably, their most substantial impact has been in the area of computer vision, where they achieve state-of-the-art image generation. This chapter gives an introduction to GANs, by discussing their principle mechanism and presenting some of their inherent problems during training and evaluation. We focus on these three issues: (1) mode collapse, (2) vanishing gradients, and (3) generation of low-quality images. We then list some architecture-variant and loss-variant GANs that remedy the above challenges. Lastly, we present two utilization examples of GANs for real-world applications: Data augmentation and face images generation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.00667v1-abstract-full').style.display = 'none'; document.getElementById('2203.00667v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2111.03404">arXiv:2111.03404</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2111.03404">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1371/journal.pone.0265691">10.1371/journal.pone.0265691 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> A bone suppression model ensemble to improve COVID-19 detection in chest X-rays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Rajaraman%2C+S">Sivaramakrishnan Rajaraman</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gregg Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Spear%2C+L">Lillian Spear</a>, <a href="/search/cs?searchtype=author&amp;query=folio%2C+L">Les folio</a>, <a href="/search/cs?searchtype=author&amp;query=Antani%2C+S">Sameer Antani</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2111.03404v2-abstract-short" style="display: inline;"> Chest X-ray (CXR) is a widely performed radiology examination that helps to detect abnormalities in the tissues and organs in the thoracic cavity. Detecting pulmonary abnormalities like COVID-19 may become difficult due to that they are obscured by the presence of bony structures like the ribs and the clavicles, thereby resulting in screening/diagnostic misinterpretations. Automated bone suppressi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.03404v2-abstract-full').style.display = 'inline'; document.getElementById('2111.03404v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2111.03404v2-abstract-full" style="display: none;"> Chest X-ray (CXR) is a widely performed radiology examination that helps to detect abnormalities in the tissues and organs in the thoracic cavity. Detecting pulmonary abnormalities like COVID-19 may become difficult due to that they are obscured by the presence of bony structures like the ribs and the clavicles, thereby resulting in screening/diagnostic misinterpretations. Automated bone suppression methods would help suppress these bony structures and increase soft tissue visibility. In this study, we propose to build an ensemble of convolutional neural network models to suppress bones in frontal CXRs, improve classification performance, and reduce interpretation errors related to COVID-19 detection. The ensemble is constructed by (i) measuring the multi-scale structural similarity index (MS-SSIM) score between the sub-blocks of the bone-suppressed image predicted by each of the top-3 performing bone-suppression models and the corresponding sub-blocks of its respective ground truth soft-tissue image, and (ii) performing a majority voting of the MS-SSIM score computed in each sub-block to identify the sub-block with the maximum MS-SSIM score and use it in constructing the final bone-suppressed image. We empirically determine the sub-block size that delivers superior bone suppression performance. It is observed that the bone suppression model ensemble outperformed the individual models in terms of MS-SSIM and other metrics. A CXR modality-specific classification model is retrained and evaluated on the non-bone-suppressed and bone-suppressed images to classify them as showing normal lungs or other COVID-19-like manifestations. We observed that the bone-suppressed model training significantly outperformed the model trained on non-bone-suppressed images toward detecting COVID-19 manifestations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.03404v2-abstract-full').style.display = 'none'; document.getElementById('2111.03404v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">29 pages, 10 figures, 4 tables</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.4 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.12813">arXiv:2109.12813</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2109.12813">pdf</a>, <a href="https://arxiv.org/format/2109.12813">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ACCESS.2022.3200699">10.1109/ACCESS.2022.3200699 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> An optimised deep spiking neural network architecture without gradients </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bethi%2C+Y">Yeshwanth Bethi</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Y">Ying Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gregory Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=van+Schaik%2C+A">Andre van Schaik</a>, <a href="/search/cs?searchtype=author&amp;query=Afshar%2C+S">Saeed Afshar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.12813v3-abstract-short" style="display: inline;"> We present an end-to-end trainable modular event-driven neural architecture that uses local synaptic and threshold adaptation rules to perform transformations between arbitrary spatio-temporal spike patterns. The architecture represents a highly abstracted model of existing Spiking Neural Network (SNN) architectures. The proposed Optimized Deep Event-driven Spiking neural network Architecture (ODE&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.12813v3-abstract-full').style.display = 'inline'; document.getElementById('2109.12813v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.12813v3-abstract-full" style="display: none;"> We present an end-to-end trainable modular event-driven neural architecture that uses local synaptic and threshold adaptation rules to perform transformations between arbitrary spatio-temporal spike patterns. The architecture represents a highly abstracted model of existing Spiking Neural Network (SNN) architectures. The proposed Optimized Deep Event-driven Spiking neural network Architecture (ODESA) can simultaneously learn hierarchical spatio-temporal features at multiple arbitrary time scales. ODESA performs online learning without the use of error back-propagation or the calculation of gradients. Through the use of simple local adaptive selection thresholds at each node, the network rapidly learns to appropriately allocate its neuronal resources at each layer for any given problem without using a real-valued error measure. These adaptive selection thresholds are the central feature of ODESA, ensuring network stability and remarkable robustness to noise as well as to the selection of initial system parameters. Network activations are inherently sparse due to a hard Winner-Take-All (WTA) constraint at each layer. We evaluate the architecture on existing spatio-temporal datasets, including the spike-encoded IRIS and TIDIGITS datasets, as well as a novel set of tasks based on International Morse Code that we created. These tests demonstrate the hierarchical spatio-temporal learning capabilities of ODESA. Through these tests, we demonstrate ODESA can optimally solve practical and highly challenging hierarchical spatio-temporal learning tasks with the minimum possible number of computing nodes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.12813v3-abstract-full').style.display = 'none'; document.getElementById('2109.12813v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 6 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.6; I.5.1 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> IEEE Access, 2022 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.08191">arXiv:2109.08191</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2109.08191">pdf</a>, <a href="https://arxiv.org/format/2109.08191">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Simple Post-Training Robustness Using Test Time Augmentations and Random Forest </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gilad Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Giryes%2C+R">Raja Giryes</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.08191v2-abstract-short" style="display: inline;"> Although Deep Neural Networks (DNNs) achieve excellent performance on many real-world tasks, they are highly vulnerable to adversarial attacks. A leading defense against such attacks is adversarial training, a technique in which a DNN is trained to be robust to adversarial attacks by introducing adversarial noise to its input. This procedure is effective but must be done during the training phase.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.08191v2-abstract-full').style.display = 'inline'; document.getElementById('2109.08191v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.08191v2-abstract-full" style="display: none;"> Although Deep Neural Networks (DNNs) achieve excellent performance on many real-world tasks, they are highly vulnerable to adversarial attacks. A leading defense against such attacks is adversarial training, a technique in which a DNN is trained to be robust to adversarial attacks by introducing adversarial noise to its input. This procedure is effective but must be done during the training phase. In this work, we propose Augmented Random Forest (ARF), a simple and easy-to-use strategy for robustifying an existing pretrained DNN without modifying its weights. For every image, we generate randomized test time augmentations by applying diverse color, blur, noise, and geometric transforms. Then we use the DNN&#39;s logits output to train a simple random forest to predict the real class label. Our method achieves state-of-the-art adversarial robustness on a diversity of white and black box attacks with minimal compromise on the natural images&#39; classification. We test ARF also against numerous adaptive white-box attacks and it shows excellent results when combined with adversarial training. Code is available at https://github.com/giladcohen/ARF. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.08191v2-abstract-full').style.display = 'none'; document.getElementById('2109.08191v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.12829">arXiv:2102.12829</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2102.12829">pdf</a>, <a href="https://arxiv.org/format/2102.12829">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> </div> </div> <p class="title is-5 mathjax"> Automatic Classification of OSA related Snoring Signals from Nocturnal Audio Recordings </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sebastian%2C+A">Arun Sebastian</a>, <a href="/search/cs?searchtype=author&amp;query=Cistulli%2C+P+A">Peter A. Cistulli</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gary Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=de+Chazal%2C+P">Philip de Chazal</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.12829v2-abstract-short" style="display: inline;"> In this study, the development of an automatic algorithm is presented to classify the nocturnal audio recording of an obstructive sleep apnoea (OSA) patient as OSA related snore, simple snore and other sounds. Recent studies has been shown that knowledge regarding the OSA related snore could assist in identifying the site of airway collapse. Audio signal was recorded simultaneously with full-night&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.12829v2-abstract-full').style.display = 'inline'; document.getElementById('2102.12829v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.12829v2-abstract-full" style="display: none;"> In this study, the development of an automatic algorithm is presented to classify the nocturnal audio recording of an obstructive sleep apnoea (OSA) patient as OSA related snore, simple snore and other sounds. Recent studies has been shown that knowledge regarding the OSA related snore could assist in identifying the site of airway collapse. Audio signal was recorded simultaneously with full-night polysomnography during sleep with a ceiling microphone. Time and frequency features of the nocturnal audio signal were extracted to classify the audio signal into OSA related snore, simple snore and other sounds. Two algorithms were developed to extract OSA related snore using an linear discriminant analysis (LDA) classifier based on the hypothesis that OSA related snoring can assist in identifying the site-of-upper airway collapse. An unbiased nested leave-one patient-out cross-validation process was used to select a high performing feature set from the full set of features. Results indicated that the algorithm achieved an accuracy of 87% for identifying snore events from the audio recordings and an accuracy of 72% for identifying OSA related snore events from the snore events. The direct method to extract OSA-related snore events using a multi-class LDA classifier achieved an accuracy of 64% using the feature selection algorithm. Our results gives a clear indication that OSA-related snore events can be extracted from nocturnal sound recordings, and therefore could potentially be used as a new tool for identifying the site of airway collapse from the nocturnal audio recordings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.12829v2-abstract-full').style.display = 'none'; document.getElementById('2102.12829v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 March, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 25 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1911.08730">arXiv:1911.08730</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1911.08730">pdf</a>, <a href="https://arxiv.org/format/1911.08730">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Event-based Object Detection and Tracking for Space Situational Awareness </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Afshar%2C+S">Saeed Afshar</a>, <a href="/search/cs?searchtype=author&amp;query=Nicholson%2C+A+P">Andrew P Nicholson</a>, <a href="/search/cs?searchtype=author&amp;query=van+Schaik%2C+A">Andre van Schaik</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gregory Cohen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1911.08730v1-abstract-short" style="display: inline;"> In this work, we present optical space imaging using an unconventional yet promising class of imaging devices known as neuromorphic event-based sensors. These devices, which are modeled on the human retina, do not operate with frames, but rather generate asynchronous streams of events in response to changes in log-illumination at each pixel. These devices are therefore extremely fast, do not have&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.08730v1-abstract-full').style.display = 'inline'; document.getElementById('1911.08730v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1911.08730v1-abstract-full" style="display: none;"> In this work, we present optical space imaging using an unconventional yet promising class of imaging devices known as neuromorphic event-based sensors. These devices, which are modeled on the human retina, do not operate with frames, but rather generate asynchronous streams of events in response to changes in log-illumination at each pixel. These devices are therefore extremely fast, do not have fixed exposure times, allow for imaging whilst the device is moving and enable low power space imaging during daytime as well as night without modification of the sensors. Recorded at multiple remote sites, we present the first event-based space imaging dataset including recordings from multiple event-based sensors from multiple providers, greatly lowering the barrier to entry for other researchers given the scarcity of such sensors and the expertise required to operate them. The dataset contains 236 separate recordings and 572 labeled resident space objects. The event-based imaging paradigm presents unique opportunities and challenges motivating the development of specialized event-based algorithms that can perform tasks such as detection and tracking in an event-based manner. Here we examine a range of such event-based algorithms for detection and tracking. The presented methods are designed specifically for space situational awareness applications and are evaluated in terms of accuracy and speed and suitability for implementation in neuromorphic hardware on remote or space-based imaging platforms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.08730v1-abstract-full').style.display = 'none'; document.getElementById('1911.08730v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1909.06872">arXiv:1909.06872</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1909.06872">pdf</a>, <a href="https://arxiv.org/format/1909.06872">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Detecting Adversarial Samples Using Influence Functions and Nearest Neighbors </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gilad Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Sapiro%2C+G">Guillermo Sapiro</a>, <a href="/search/cs?searchtype=author&amp;query=Giryes%2C+R">Raja Giryes</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1909.06872v2-abstract-short" style="display: inline;"> Deep neural networks (DNNs) are notorious for their vulnerability to adversarial attacks, which are small perturbations added to their input images to mislead their prediction. Detection of adversarial examples is, therefore, a fundamental requirement for robust classification frameworks. In this work, we present a method for detecting such adversarial attacks, which is suitable for any pre-traine&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.06872v2-abstract-full').style.display = 'inline'; document.getElementById('1909.06872v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1909.06872v2-abstract-full" style="display: none;"> Deep neural networks (DNNs) are notorious for their vulnerability to adversarial attacks, which are small perturbations added to their input images to mislead their prediction. Detection of adversarial examples is, therefore, a fundamental requirement for robust classification frameworks. In this work, we present a method for detecting such adversarial attacks, which is suitable for any pre-trained neural network classifier. We use influence functions to measure the impact of every training sample on the validation set data. From the influence scores, we find the most supportive training samples for any given validation example. A k-nearest neighbor (k-NN) model fitted on the DNN&#39;s activation layers is employed to search for the ranking of these supporting training samples. We observe that these samples are highly correlated with the nearest neighbors of the normal inputs, while this correlation is much weaker for adversarial inputs. We train an adversarial detector using the k-NN ranks and distances and show that it successfully distinguishes adversarial examples, getting state-of-the-art results on six attack methods with three datasets. Code is available at https://github.com/giladcohen/NNIF_adv_defense. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.06872v2-abstract-full').style.display = 'none'; document.getElementById('1909.06872v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 March, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 September, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Paper accepted to CVPR 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1907.07853">arXiv:1907.07853</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1907.07853">pdf</a>, <a href="https://arxiv.org/format/1907.07853">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Event-based Feature Extraction Using Adaptive Selection Thresholds </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Afshar%2C+S">Saeed Afshar</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+Y">Ying Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Tapson%2C+J">Jonathan Tapson</a>, <a href="/search/cs?searchtype=author&amp;query=van+Schaik%2C+A">Andr茅 van Schaik</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gregory Cohen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1907.07853v2-abstract-short" style="display: inline;"> Unsupervised feature extraction algorithms form one of the most important building blocks in machine learning systems. These algorithms are often adapted to the event-based domain to perform online learning in neuromorphic hardware. However, not designed for the purpose, such algorithms typically require significant simplification during implementation to meet hardware constraints, creating trade&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.07853v2-abstract-full').style.display = 'inline'; document.getElementById('1907.07853v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1907.07853v2-abstract-full" style="display: none;"> Unsupervised feature extraction algorithms form one of the most important building blocks in machine learning systems. These algorithms are often adapted to the event-based domain to perform online learning in neuromorphic hardware. However, not designed for the purpose, such algorithms typically require significant simplification during implementation to meet hardware constraints, creating trade offs with performance. Furthermore, conventional feature extraction algorithms are not designed to generate useful intermediary signals which are valuable only in the context of neuromorphic hardware limitations. In this work a novel event-based feature extraction method is proposed that focuses on these issues. The algorithm operates via simple adaptive selection thresholds which allow a simpler implementation of network homeostasis than previous works by trading off a small amount of information loss in the form of missed events that fall outside the selection thresholds. The behavior of the selection thresholds and the output of the network as a whole are shown to provide uniquely useful signals indicating network weight convergence without the need to access network weights. A novel heuristic method for network size selection is proposed which makes use of noise events and their feature representations. The use of selection thresholds is shown to produce network activation patterns that predict classification accuracy allowing rapid evaluation and optimization of system parameters without the need to run back-end classifiers. The feature extraction method is tested on both the N-MNIST benchmarking dataset and a dataset of airplanes passing through the field of view. Multiple configurations with different classifiers are tested with the results quantifying the resultant performance gains at each processing stage. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.07853v2-abstract-full').style.display = 'none'; document.getElementById('1907.07853v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 July, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 July, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 Pages. 9 Figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1904.00415">arXiv:1904.00415</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1904.00415">pdf</a>, <a href="https://arxiv.org/format/1904.00415">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Road Scene Understanding by Occupancy Grid Learning from Sparse Radar Clusters using Semantic Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sless%2C+L">Liat Sless</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gilad Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Shlomo%2C+B+E">Bat El Shlomo</a>, <a href="/search/cs?searchtype=author&amp;query=Oron%2C+S">Shaul Oron</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1904.00415v2-abstract-short" style="display: inline;"> Occupancy grid mapping is an important component in road scene understanding for autonomous driving. It encapsulates information of the drivable area, road obstacles and enables safe autonomous driving. Radars are an emerging sensor in autonomous vehicle vision, becoming more widely used due to their long range sensing, low cost, and robustness to severe weather conditions. Despite recent advances&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.00415v2-abstract-full').style.display = 'inline'; document.getElementById('1904.00415v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1904.00415v2-abstract-full" style="display: none;"> Occupancy grid mapping is an important component in road scene understanding for autonomous driving. It encapsulates information of the drivable area, road obstacles and enables safe autonomous driving. Radars are an emerging sensor in autonomous vehicle vision, becoming more widely used due to their long range sensing, low cost, and robustness to severe weather conditions. Despite recent advances in deep learning technology, occupancy grid mapping from radar data is still mostly done using classical filtering approaches.In this work, we propose learning the inverse sensor model used for occupancy grid mapping from clustered radar data. This is done in a data driven approach that leverages computer vision techniques. This task is very challenging due to data sparsity and noise characteristics of the radar sensor. The problem is formulated as a semantic segmentation task and we show how it can be learned using lidar data for generating ground truth. We show both qualitatively and quantitatively that our learned occupancy net outperforms classic methods by a large margin using the recently released NuScenes real-world driving data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.00415v2-abstract-full').style.display = 'none'; document.getElementById('1904.00415v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 September, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 31 March, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> ICCV 2019 CVRSUAD </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1806.05427">arXiv:1806.05427</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1806.05427">pdf</a>, <a href="https://arxiv.org/ps/1806.05427">ps</a>, <a href="https://arxiv.org/format/1806.05427">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Maximum weight spectrum codes with reduced length </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G+D">Gerard D Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Tolhuizen%2C+L">Ludo Tolhuizen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1806.05427v1-abstract-short" style="display: inline;"> A q-ary linear code of dimension k is called a maximum weight spectrum (MWS) code if it has the maximum possible number (viz. (q^k-1)/(q-1)) of different non-zero weights. We construct MWS codes from quasi-minimal codes, thus obtaining of much shorter length than hitherto known. By an averaging argument, we show the existence of MWS codes of even shorter length. </span> <span class="abstract-full has-text-grey-dark mathjax" id="1806.05427v1-abstract-full" style="display: none;"> A q-ary linear code of dimension k is called a maximum weight spectrum (MWS) code if it has the maximum possible number (viz. (q^k-1)/(q-1)) of different non-zero weights. We construct MWS codes from quasi-minimal codes, thus obtaining of much shorter length than hitherto known. By an averaging argument, we show the existence of MWS codes of even shorter length. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.05427v1-abstract-full').style.display = 'none'; document.getElementById('1806.05427v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 June, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The second author dedicates this manuscript to the memory of Gerard Cohen, who sadly passed away during its preparation</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1805.06822">arXiv:1805.06822</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1805.06822">pdf</a>, <a href="https://arxiv.org/format/1805.06822">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> DNN or k-NN: That is the Generalize vs. Memorize Question </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gilad Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Sapiro%2C+G">Guillermo Sapiro</a>, <a href="/search/cs?searchtype=author&amp;query=Giryes%2C+R">Raja Giryes</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1805.06822v6-abstract-short" style="display: inline;"> This paper studies the relationship between the classification performed by deep neural networks (DNNs) and the decision of various classical classifiers, namely k-nearest neighbours (k-NN), support vector machines (SVM) and logistic regression (LR), at various layers of the network. This comparison provides us with new insights as to the ability of neural networks to both memorize the training da&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.06822v6-abstract-full').style.display = 'inline'; document.getElementById('1805.06822v6-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1805.06822v6-abstract-full" style="display: none;"> This paper studies the relationship between the classification performed by deep neural networks (DNNs) and the decision of various classical classifiers, namely k-nearest neighbours (k-NN), support vector machines (SVM) and logistic regression (LR), at various layers of the network. This comparison provides us with new insights as to the ability of neural networks to both memorize the training data and generalize to new data at the same time, where k-NN serves as the ideal estimator that perfectly memorizes the data. We show that memorization of non-generalizing networks happens only at the last layers. Moreover, the behavior of DNNs compared to the linear classifiers SVM and LR is quite the same on the training and test data regardless of whether the network generalizes. On the other hand, the similarity to k-NN holds only at the absence of overfitting. Our results suggests that k-NN behavior of the network on new data is a sign of generalization. Moreover, it shows that memorization and generalization, which are traditionally considered to be contradicting to each other, are compatible and complementary. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.06822v6-abstract-full').style.display = 'none'; document.getElementById('1805.06822v6-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 February, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 May, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Poster presented in NIPS 2018 &#34;Integration of Deep Learning Theories&#34; workshop</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1802.03796">arXiv:1802.03796</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1802.03796">pdf</a>, <a href="https://arxiv.org/format/1802.03796">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Curriculum Learning by Transfer Learning: Theory and Experiments with Deep Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Weinshall%2C+D">Daphna Weinshall</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gad Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Amir%2C+D">Dan Amir</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1802.03796v4-abstract-short" style="display: inline;"> We provide theoretical investigation of curriculum learning in the context of stochastic gradient descent when optimizing the convex linear regression loss. We prove that the rate of convergence of an ideal curriculum learning method is monotonically increasing with the difficulty of the examples. Moreover, among all equally difficult points, convergence is faster when using points which incur hig&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1802.03796v4-abstract-full').style.display = 'inline'; document.getElementById('1802.03796v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1802.03796v4-abstract-full" style="display: none;"> We provide theoretical investigation of curriculum learning in the context of stochastic gradient descent when optimizing the convex linear regression loss. We prove that the rate of convergence of an ideal curriculum learning method is monotonically increasing with the difficulty of the examples. Moreover, among all equally difficult points, convergence is faster when using points which incur higher loss with respect to the current hypothesis. We then analyze curriculum learning in the context of training a CNN. We describe a method which infers the curriculum by way of transfer learning from another network, pre-trained on a different task. While this approach can only approximate the ideal curriculum, we observe empirically similar behavior to the one predicted by the theory, namely, a significant boost in convergence speed at the beginning of training. When the task is made more difficult, improvement in generalization performance is also observed. Finally, curriculum learning exhibits robustness against unfavorable conditions such as excessive regularization. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1802.03796v4-abstract-full').style.display = 'none'; document.getElementById('1802.03796v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 June, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 February, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ICML 2018</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Proceedings: 35th International Conference on Machine Learning (ICML), oral, Stockholm Sweden, July 2018 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1802.00148">arXiv:1802.00148</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1802.00148">pdf</a>, <a href="https://arxiv.org/format/1802.00148">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/s10623-018-0488-z">10.1007/s10623-018-0488-z <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> How many weights can a linear code have ? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shi%2C+M">Minjia Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+H">Hongwei Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Sol%C3%A9%2C+P">Patrick Sol茅</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G+D">G茅rard D. Cohen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1802.00148v2-abstract-short" style="display: inline;"> We study the combinatorial function $L(k,q),$ the maximum number of nonzero weights a linear code of dimension $k$ over $\F_q$ can have. We determine it completely for $q=2,$ and for $k=2,$ and provide upper and lower bounds in the general case when both $k$ and $q$ are $\ge 3.$ A refinement $L(n,k,q),$ as well as nonlinear analogues $N(M,q)$ and $N(n,M,q),$ are also introduced and studied. </span> <span class="abstract-full has-text-grey-dark mathjax" id="1802.00148v2-abstract-full" style="display: none;"> We study the combinatorial function $L(k,q),$ the maximum number of nonzero weights a linear code of dimension $k$ over $\F_q$ can have. We determine it completely for $q=2,$ and for $k=2,$ and provide upper and lower bounds in the general case when both $k$ and $q$ are $\ge 3.$ A refinement $L(n,k,q),$ as well as nonlinear analogues $N(M,q)$ and $N(n,M,q),$ are also introduced and studied. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1802.00148v2-abstract-full').style.display = 'none'; document.getElementById('1802.00148v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 April, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 31 January, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Designs, Codes and Cryptography, 2018 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1710.01783">arXiv:1710.01783</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1710.01783">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> </div> </div> <p class="title is-5 mathjax"> Some facts on Permanents in Finite Characteristics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Knezevic%2C+A">Anna Knezevic</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Greg Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Domanskaya%2C+M">Marina Domanskaya</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1710.01783v9-abstract-short" style="display: inline;"> The polynomial-time computability of the permanent over fields of characteristic 3 for k-semi-unitary matrices (i.e. square matrices such that the differences of their Gram matrices and the corresponding identity matrices are of rank k) in the case k = 0 or k = 1 and its #3P-completeness for any k &gt; 1 (Ref. 9) is a result that essentially widens our understanding of the computational complexity bo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1710.01783v9-abstract-full').style.display = 'inline'; document.getElementById('1710.01783v9-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1710.01783v9-abstract-full" style="display: none;"> The polynomial-time computability of the permanent over fields of characteristic 3 for k-semi-unitary matrices (i.e. square matrices such that the differences of their Gram matrices and the corresponding identity matrices are of rank k) in the case k = 0 or k = 1 and its #3P-completeness for any k &gt; 1 (Ref. 9) is a result that essentially widens our understanding of the computational complexity boundaries for the permanent modulo 3. Now we extend this result to study more closely the case k &gt; 1 regarding the (n-k)x(n-k)-sub-permanents (or permanent-minors) of a unitary nxn-matrix and their possible relations, because an (n-k)x(n-k)-submatrix of a unitary nxn-matrix is generically a k-semi-unitary (n-k)x(n-k)-matrix. The following paper offers a way to receive a variety of such equations of different sorts, in the meantime extending this direction of research to reviewing all the set of polynomial-time permanent-preserving reductions and equations for the sub-permanents of a generic matrix they might yield, including a number of generalizations and formulae (valid in an arbitrary prime characteristic) analogical to the classical identities relating the minors of a matrix and its inverse. Moreover, the second chapter also deals with the Hamiltonian cycle polynomial in characteristic 2 that surprisingly possesses quite a number of properties very similar to the corresponding ones of the permanent in characteristic 3, while over the field GF(2) it obtains even more amazing features. Besides, the third chapter is devoted to the computational complexity issues of the permanent and some related functions on a variety of Cauchy matrices and their certain generalizations, including constructing a polynomial-time algorithm (based on them) for the permanent of an arbitrary matrix in characteristic 5 (implying RP = NP) and conjecturing the existence of a similar scheme in characteristic 3. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1710.01783v9-abstract-full').style.display = 'none'; document.getElementById('1710.01783v9-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 October, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">89 pages; this research was partly supported by the School of Electrical Engineering, Computing and Mathematical Sciences of the Curtin University (Australia) whose member is one of the authors (Anna Knezevic)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1702.05373">arXiv:1702.05373</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1702.05373">pdf</a>, <a href="https://arxiv.org/format/1702.05373">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> EMNIST: an extension of MNIST to handwritten letters </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gregory Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Afshar%2C+S">Saeed Afshar</a>, <a href="/search/cs?searchtype=author&amp;query=Tapson%2C+J">Jonathan Tapson</a>, <a href="/search/cs?searchtype=author&amp;query=van+Schaik%2C+A">Andr茅 van Schaik</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1702.05373v2-abstract-short" style="display: inline;"> The MNIST dataset has become a standard benchmark for learning, classification and computer vision systems. Contributing to its widespread adoption are the understandable and intuitive nature of the task, its relatively small size and storage requirements and the accessibility and ease-of-use of the database itself. The MNIST database was derived from a larger dataset known as the NIST Special Dat&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1702.05373v2-abstract-full').style.display = 'inline'; document.getElementById('1702.05373v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1702.05373v2-abstract-full" style="display: none;"> The MNIST dataset has become a standard benchmark for learning, classification and computer vision systems. Contributing to its widespread adoption are the understandable and intuitive nature of the task, its relatively small size and storage requirements and the accessibility and ease-of-use of the database itself. The MNIST database was derived from a larger dataset known as the NIST Special Database 19 which contains digits, uppercase and lowercase handwritten letters. This paper introduces a variant of the full NIST dataset, which we have called Extended MNIST (EMNIST), which follows the same conversion paradigm used to create the MNIST dataset. The result is a set of datasets that constitute a more challenging classification tasks involving letters and digits, and that shares the same image structure and parameters as the original MNIST task, allowing for direct compatibility with all existing classifiers and systems. Benchmark results are presented along with a validation of the conversion process through the comparison of the classification results on converted NIST digits and the MNIST digits. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1702.05373v2-abstract-full').style.display = 'none'; document.getElementById('1702.05373v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 March, 2017; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 February, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The dataset is now available for download from https://www.westernsydney.edu.au/bens/home/reproducible_research/emnist. This link is also included in the revised article</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1608.06318">arXiv:1608.06318</a> <span>&nbsp;&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Privacy Amplification Against Active Quantum Adversaries </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gil Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Vidick%2C+T">Thomas Vidick</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1608.06318v2-abstract-short" style="display: inline;"> Privacy amplification is the task by which two cooperating parties transform a shared weak secret, about which an eavesdropper may have side information, into a uniformly random string uncorrelated from the eavesdropper. Privacy amplification against passive adversaries, where it is assumed that the communication is over a public but authenticated channel, can be achieved in the presence of classi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1608.06318v2-abstract-full').style.display = 'inline'; document.getElementById('1608.06318v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1608.06318v2-abstract-full" style="display: none;"> Privacy amplification is the task by which two cooperating parties transform a shared weak secret, about which an eavesdropper may have side information, into a uniformly random string uncorrelated from the eavesdropper. Privacy amplification against passive adversaries, where it is assumed that the communication is over a public but authenticated channel, can be achieved in the presence of classical as well as quantum side information by a single-message protocol based on strong extractors. In 2009 Dodis and Wichs devised a two-message protocol to achieve privacy amplification against active adversaries, where the public communication channel is no longer assumed to be authenticated, through the use of a strengthening of strong extractors called non-malleable extractors which they introduced. Dodis and Wichs only analyzed the case of classical side information. We consider the task of privacy amplification against active adversaries with quantum side information. Our main result is showing that the Dodis-Wichs protocol remains secure in this scenario provided its main building block, the non-malleable extractor, satisfies a notion of quantum-proof non-malleability which we introduce. We show that an adaptation of a recent construction of non-malleable extractors due to Chattopadhyay et al. is quantum proof, thereby providing the first protocol for privacy amplification that is secure against active quantum adversaries. Our protocol is quantitatively comparable to the near-optimal protocols known in the classical setting. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1608.06318v2-abstract-full').style.display = 'none'; document.getElementById('1608.06318v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 September, 2017; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 August, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2016. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The result is invalidated due to a mistake, pointed out by an anonymous referee, in the use of the Markov condition at the beginning of the proof of Theorem 31</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1605.04194">arXiv:1605.04194</a> <span>&nbsp;&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> </div> </div> <p class="title is-5 mathjax"> Quantum-Proof Extractors: Optimal up to Constant Factors </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chung%2C+K">Kai-Min Chung</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gil Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Vidick%2C+T">Thomas Vidick</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+X">Xiaodi Wu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1605.04194v2-abstract-short" style="display: inline;"> We give the first construction of a family of quantum-proof extractors that has optimal seed length dependence $O(\log(n/\varepsilon))$ on the input length $n$ and error $\varepsilon$. Our extractors support any min-entropy $k=惟(\log{n} + \log^{1+伪}(1/\varepsilon))$ and extract $m=(1-伪)k$ bits that are $\varepsilon$-close to uniform, for any desired constant $伪&gt; 0$. Previous constructions had a qu&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1605.04194v2-abstract-full').style.display = 'inline'; document.getElementById('1605.04194v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1605.04194v2-abstract-full" style="display: none;"> We give the first construction of a family of quantum-proof extractors that has optimal seed length dependence $O(\log(n/\varepsilon))$ on the input length $n$ and error $\varepsilon$. Our extractors support any min-entropy $k=惟(\log{n} + \log^{1+伪}(1/\varepsilon))$ and extract $m=(1-伪)k$ bits that are $\varepsilon$-close to uniform, for any desired constant $伪&gt; 0$. Previous constructions had a quadratically worse seed length or were restricted to very large input min-entropy or very few output bits. Our result is based on a generic reduction showing that any strong classical condenser is automatically quantum-proof, with comparable parameters. The existence of such a reduction for extractors is a long-standing open question, here we give an affirmative answer for condensers. Once this reduction is established, to obtain our quantum-proof extractors one only needs to consider high entropy sources. We construct quantum-proof extractors with the desired parameters for such sources by extending a classical approach to extractor construction, based on the use of block-sources and sampling, to the quantum setting. Our extractors can be used to obtain improved protocols for device-independent randomness expansion and for privacy amplification. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1605.04194v2-abstract-full').style.display = 'none'; document.getElementById('1605.04194v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 July, 2016; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 May, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2016. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The paper has been withdrawn due to an error in the proof of Lemma 3.4 (step going from second-last to last centered equations), which invalidates the main result</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1603.04223">arXiv:1603.04223</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1603.04223">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Investigation of event-based memory surfaces for high-speed tracking, unsupervised feature extraction and object recognition </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Afshar%2C+S">Saeed Afshar</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gregory Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Hamilton%2C+T+J">Tara Julia Hamilton</a>, <a href="/search/cs?searchtype=author&amp;query=Tapson%2C+J">Jonathan Tapson</a>, <a href="/search/cs?searchtype=author&amp;query=van+Schaik%2C+A">Andre van Schaik</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1603.04223v3-abstract-short" style="display: inline;"> In this paper we compare event-based decaying and time based-decaying memory surfaces for high-speed eventbased tracking, feature extraction, and object classification using an event-based camera. The high-speed recognition task involves detecting and classifying model airplanes that are dropped free-hand close to the camera lens so as to generate a challenging dataset exhibiting significant varia&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1603.04223v3-abstract-full').style.display = 'inline'; document.getElementById('1603.04223v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1603.04223v3-abstract-full" style="display: none;"> In this paper we compare event-based decaying and time based-decaying memory surfaces for high-speed eventbased tracking, feature extraction, and object classification using an event-based camera. The high-speed recognition task involves detecting and classifying model airplanes that are dropped free-hand close to the camera lens so as to generate a challenging dataset exhibiting significant variance in target velocity. This variance motivated the investigation of event-based decaying memory surfaces in comparison to time-based decaying memory surfaces to capture the temporal aspect of the event-based data. These surfaces are then used to perform unsupervised feature extraction, tracking and recognition. In order to generate the memory surfaces, event binning, linearly decaying kernels, and exponentially decaying kernels were investigated with exponentially decaying kernels found to perform best. Event-based decaying memory surfaces were found to outperform time-based decaying memory surfaces in recognition especially when invariance to target velocity was made a requirement. A range of network and receptive field sizes were investigated. The system achieves 98.75% recognition accuracy within 156 milliseconds of an airplane entering the field of view, using only twenty-five event-based feature extracting neurons in series with a linear classifier. By comparing the linear classifier results to an ELM classifier, we find that a small number of event-based feature extractors can effectively project the complex spatio-temporal event patterns of the dataset to an almost linearly separable representation in feature space. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1603.04223v3-abstract-full').style.display = 'none'; document.getElementById('1603.04223v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 November, 2017; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 March, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2016. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This is an updated version of a previously submitted manuscript</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1507.07629">arXiv:1507.07629</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1507.07629">pdf</a>, <a href="https://arxiv.org/format/1507.07629">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neurons and Cognition">q-bio.NC</span> </div> </div> <p class="title is-5 mathjax"> Converting Static Image Datasets to Spiking Neuromorphic Datasets Using Saccades </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Orchard%2C+G">Garrick Orchard</a>, <a href="/search/cs?searchtype=author&amp;query=Jayawant%2C+A">Ajinkya Jayawant</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gregory Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Thakor%2C+N">Nitish Thakor</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1507.07629v1-abstract-short" style="display: inline;"> Creating datasets for Neuromorphic Vision is a challenging task. A lack of available recordings from Neuromorphic Vision sensors means that data must typically be recorded specifically for dataset creation rather than collecting and labelling existing data. The task is further complicated by a desire to simultaneously provide traditional frame-based recordings to allow for direct comparison with t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1507.07629v1-abstract-full').style.display = 'inline'; document.getElementById('1507.07629v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1507.07629v1-abstract-full" style="display: none;"> Creating datasets for Neuromorphic Vision is a challenging task. A lack of available recordings from Neuromorphic Vision sensors means that data must typically be recorded specifically for dataset creation rather than collecting and labelling existing data. The task is further complicated by a desire to simultaneously provide traditional frame-based recordings to allow for direct comparison with traditional Computer Vision algorithms. Here we propose a method for converting existing Computer Vision static image datasets into Neuromorphic Vision datasets using an actuated pan-tilt camera platform. Moving the sensor rather than the scene or image is a more biologically realistic approach to sensing and eliminates timing artifacts introduced by monitor updates when simulating motion on a computer monitor. We present conversion of two popular image datasets (MNIST and Caltech101) which have played important roles in the development of Computer Vision, and we provide performance metrics on these datasets using spike-based recognition algorithms. This work contributes datasets for future use in the field, as well as results from spike-based algorithms against which future works can compare. Furthermore, by converting datasets already popular in Computer Vision, we enable more direct comparison with frame-based approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1507.07629v1-abstract-full').style.display = 'none'; document.getElementById('1507.07629v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 July, 2015; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2015. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 6 figures in Frontiers in Neuromorphic Engineering, special topic on Benchmarks and Challenges for Neuromorphic Engineering, 2015 (under review)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1506.04428">arXiv:1506.04428</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1506.04428">pdf</a>, <a href="https://arxiv.org/format/1506.04428">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Combinatorics">math.CO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Discrete Mathematics">cs.DM</span> </div> </div> <p class="title is-5 mathjax"> Two-Source Dispersers for Polylogarithmic Entropy and Improved Ramsey Graphs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gil Cohen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1506.04428v1-abstract-short" style="display: inline;"> In his 1947 paper that inaugurated the probabilistic method, Erd艖s proved the existence of $2\log{n}$-Ramsey graphs on $n$ vertices. Matching Erd艖s&#39; result with a constructive proof is a central problem in combinatorics, that has gained a significant attention in the literature. The state of the art result was obtained in the celebrated paper by Barak, Rao, Shaltiel and Wigderson [Ann. Math&#39;12], w&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1506.04428v1-abstract-full').style.display = 'inline'; document.getElementById('1506.04428v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1506.04428v1-abstract-full" style="display: none;"> In his 1947 paper that inaugurated the probabilistic method, Erd艖s proved the existence of $2\log{n}$-Ramsey graphs on $n$ vertices. Matching Erd艖s&#39; result with a constructive proof is a central problem in combinatorics, that has gained a significant attention in the literature. The state of the art result was obtained in the celebrated paper by Barak, Rao, Shaltiel and Wigderson [Ann. Math&#39;12], who constructed a $2^{2^{(\log\log{n})^{1-伪}}}$-Ramsey graph, for some small universal constant $伪&gt; 0$. In this work, we significantly improve the result of Barak~\etal and construct $2^{(\log\log{n})^c}$-Ramsey graphs, for some universal constant $c$. In the language of theoretical computer science, our work resolves the problem of explicitly constructing two-source dispersers for polylogarithmic entropy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1506.04428v1-abstract-full').style.display = 'none'; document.getElementById('1506.04428v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 June, 2015; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2015. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1505.02495">arXiv:1505.02495</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1505.02495">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> </div> </div> <p class="title is-5 mathjax"> An Online Learning Algorithm for Neuromorphic Hardware Implementation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Thakur%2C+C+S">Chetan Singh Thakur</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+R">Runchun Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Afshar%2C+S">Saeed Afshar</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gregory Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Hamilton%2C+T+J">Tara Julia Hamilton</a>, <a href="/search/cs?searchtype=author&amp;query=Tapson%2C+J">Jonathan Tapson</a>, <a href="/search/cs?searchtype=author&amp;query=van+Schaik%2C+A">Andre van Schaik</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1505.02495v2-abstract-short" style="display: inline;"> We propose a sign-based online learning (SOL) algorithm for a neuromorphic hardware framework called Trainable Analogue Block (TAB). The TAB framework utilises the principles of neural population coding, implying that it encodes the input stimulus using a large pool of nonlinear neurons. The SOL algorithm is a simple weight update rule that employs the sign of the hidden layer activation and the s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1505.02495v2-abstract-full').style.display = 'inline'; document.getElementById('1505.02495v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1505.02495v2-abstract-full" style="display: none;"> We propose a sign-based online learning (SOL) algorithm for a neuromorphic hardware framework called Trainable Analogue Block (TAB). The TAB framework utilises the principles of neural population coding, implying that it encodes the input stimulus using a large pool of nonlinear neurons. The SOL algorithm is a simple weight update rule that employs the sign of the hidden layer activation and the sign of the output error, which is the difference between the target output and the predicted output. The SOL algorithm is easily implementable in hardware, and can be used in any artificial neural network framework that learns weights by minimising a convex cost function. We show that the TAB framework can be trained for various regression tasks using the SOL algorithm. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1505.02495v2-abstract-full').style.display = 'none'; document.getElementById('1505.02495v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 July, 2017; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 May, 2015; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2015. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1404.0654">arXiv:1404.0654</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1404.0654">pdf</a>, <a href="https://arxiv.org/ps/1404.0654">ps</a>, <a href="https://arxiv.org/format/1404.0654">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> </div> </div> <p class="title is-5 mathjax"> Two Structural Results for Low Degree Polynomials and Applications </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gil Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Tal%2C+A">Avishay Tal</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1404.0654v1-abstract-short" style="display: inline;"> In this paper, two structural results concerning low degree polynomials over finite fields are given. The first states that over any finite field $\mathbb{F}$, for any polynomial $f$ on $n$ variables with degree $d \le \log(n)/10$, there exists a subspace of $\mathbb{F}^n$ with dimension $惟(d \cdot n^{1/(d-1)})$ on which $f$ is constant. This result is shown to be tight. Stated differently, a degr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1404.0654v1-abstract-full').style.display = 'inline'; document.getElementById('1404.0654v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1404.0654v1-abstract-full" style="display: none;"> In this paper, two structural results concerning low degree polynomials over finite fields are given. The first states that over any finite field $\mathbb{F}$, for any polynomial $f$ on $n$ variables with degree $d \le \log(n)/10$, there exists a subspace of $\mathbb{F}^n$ with dimension $惟(d \cdot n^{1/(d-1)})$ on which $f$ is constant. This result is shown to be tight. Stated differently, a degree $d$ polynomial cannot compute an affine disperser for dimension smaller than $惟(d \cdot n^{1/(d-1)})$. Using a recursive argument, we obtain our second structural result, showing that any degree $d$ polynomial $f$ induces a partition of $F^n$ to affine subspaces of dimension $惟(n^{1/(d-1)!})$, such that $f$ is constant on each part. We extend both structural results to more than one polynomial. We further prove an analog of the first structural result to sparse polynomials (with no restriction on the degree) and to functions that are close to low degree polynomials. We also consider the algorithmic aspect of the two structural results. Our structural results have various applications, two of which are: * Dvir [CC 2012] introduced the notion of extractors for varieties, and gave explicit constructions of such extractors over large fields. We show that over any finite field, any affine extractor is also an extractor for varieties with related parameters. Our reduction also holds for dispersers, and we conclude that Shaltiel&#39;s affine disperser [FOCS 2011] is a disperser for varieties over $F_2$. * Ben-Sasson and Kopparty [SIAM J. C 2012] proved that any degree 3 affine disperser over a prime field is also an affine extractor with related parameters. Using our structural results, and based on the work of Kaufman and Lovett [FOCS 2008] and Haramaty and Shpilka [STOC 2010], we generalize this result to any constant degree. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1404.0654v1-abstract-full').style.display = 'none'; document.getElementById('1404.0654v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 April, 2014; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2014. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1402.0349">arXiv:1402.0349</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1402.0349">pdf</a>, <a href="https://arxiv.org/ps/1402.0349">ps</a>, <a href="https://arxiv.org/format/1402.0349">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Combinatorics">math.CO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Zero-error capacity of binary channels with memory </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">G茅rard Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Fachini%2C+E">Emanuela Fachini</a>, <a href="/search/cs?searchtype=author&amp;query=K%C3%B6rner%2C+J">J谩nos K枚rner</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1402.0349v2-abstract-short" style="display: inline;"> We begin a systematic study of the problem of the zero--error capacity of noisy binary channels with memory and solve some of the non--trivial cases. </span> <span class="abstract-full has-text-grey-dark mathjax" id="1402.0349v2-abstract-full" style="display: none;"> We begin a systematic study of the problem of the zero--error capacity of noisy binary channels with memory and solve some of the non--trivial cases. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1402.0349v2-abstract-full').style.display = 'none'; document.getElementById('1402.0349v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 April, 2015; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 February, 2014; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2014. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages. This paper is the revised version of our previous paper having the same title, published on ArXiV on February 3, 2014. We complete Theorem 2 of the previous version by showing here that our previous construction is asymptotically optimal. This proves that the isometric triangles yield different capacities. The new manuscript differs from the old one by the addition of one more page</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 05D05; 94A24 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1310.2017">arXiv:1310.2017</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1310.2017">pdf</a>, <a href="https://arxiv.org/ps/1310.2017">ps</a>, <a href="https://arxiv.org/format/1310.2017">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Combinatorics">math.CO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Metric Geometry">math.MG</span> </div> </div> <p class="title is-5 mathjax"> Bi-Lipschitz Bijection between the Boolean Cube and the Hamming Ball </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Benjamini%2C+I">Itai Benjamini</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gil Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Shinkar%2C+I">Igor Shinkar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1310.2017v1-abstract-short" style="display: inline;"> We construct a bi-Lipschitz bijection from the Boolean cube to the Hamming ball of equal volume. More precisely, we show that for all even n there exists an explicit bijection f from the n-dimensional Boolean cube to the Hamming ball of equal volume embedded in (n+1)-dimensional Boolean cube, such that for all x and y it holds that distance(x,y) / 5 &lt;= distance(f(x),f(y)) &lt;= 4 distance(x,y) where&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1310.2017v1-abstract-full').style.display = 'inline'; document.getElementById('1310.2017v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1310.2017v1-abstract-full" style="display: none;"> We construct a bi-Lipschitz bijection from the Boolean cube to the Hamming ball of equal volume. More precisely, we show that for all even n there exists an explicit bijection f from the n-dimensional Boolean cube to the Hamming ball of equal volume embedded in (n+1)-dimensional Boolean cube, such that for all x and y it holds that distance(x,y) / 5 &lt;= distance(f(x),f(y)) &lt;= 4 distance(x,y) where distance(,) denotes the Hamming distance. In particular, this implies that the Hamming ball is bi-Lipschitz transitive. This result gives a strong negative answer to an open problem of Lovett and Viola [CC 2012], who raised the question in the context of sampling distributions in low-level complexity classes. The conceptual implication is that the problem of proving lower bounds in the context of sampling distributions will require some new ideas beyond the sensitivity-based structural results of Boppana [IPL 97]. We study the mapping f further and show that it (and its inverse) are computable in DLOGTIME-uniform TC0, but not in AC0. Moreover, we prove that f is &#34;approximately local&#34; in the sense that all but the last output bit of f are essentially determined by a single input bit. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1310.2017v1-abstract-full').style.display = 'none'; document.getElementById('1310.2017v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 October, 2013; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2013. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1306.6265">arXiv:1306.6265</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1306.6265">pdf</a>, <a href="https://arxiv.org/ps/1306.6265">ps</a>, <a href="https://arxiv.org/format/1306.6265">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Towards Secure Two-Party Computation from the Wire-Tap Channel </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chabanne%2C+H">Herv茅 Chabanne</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">G茅rard Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Patey%2C+A">Alain Patey</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1306.6265v1-abstract-short" style="display: inline;"> We introduce a new protocol for secure two-party computation of linear functions in the semi-honest model, based on coding techniques. We first establish a parallel between the second version of the wire-tap channel model and secure two-party computation. This leads us to our protocol, that combines linear coset coding and oblivious transfer techniques. Our construction requires the use of binary&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1306.6265v1-abstract-full').style.display = 'inline'; document.getElementById('1306.6265v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1306.6265v1-abstract-full" style="display: none;"> We introduce a new protocol for secure two-party computation of linear functions in the semi-honest model, based on coding techniques. We first establish a parallel between the second version of the wire-tap channel model and secure two-party computation. This leads us to our protocol, that combines linear coset coding and oblivious transfer techniques. Our construction requires the use of binary intersecting codes or $q$-ary minimal codes, which are also studied in this paper. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1306.6265v1-abstract-full').style.display = 'none'; document.getElementById('1306.6265v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 June, 2013; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2013. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1306.3036">arXiv:1306.3036</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1306.3036">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neurons and Cognition">q-bio.NC</span> </div> </div> <p class="title is-5 mathjax"> The Ripple Pond: Enabling Spiking Networks to See </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Afshar%2C+S">Saeed Afshar</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gregory Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+R">Runchun Wang</a>, <a href="/search/cs?searchtype=author&amp;query=van+Schaik%2C+A">Andre van Schaik</a>, <a href="/search/cs?searchtype=author&amp;query=Tapson%2C+J">Jonathan Tapson</a>, <a href="/search/cs?searchtype=author&amp;query=Lehmann%2C+T">Torsten Lehmann</a>, <a href="/search/cs?searchtype=author&amp;query=Hamilton%2C+T+J">Tara Julia Hamilton</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1306.3036v1-abstract-short" style="display: inline;"> In this paper we present the biologically inspired Ripple Pond Network (RPN), a simply connected spiking neural network that, operating together with recently proposed PolyChronous Networks (PCN), enables rapid, unsupervised, scale and rotation invariant object recognition using efficient spatio-temporal spike coding. The RPN has been developed as a hardware solution linking previously implemented&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1306.3036v1-abstract-full').style.display = 'inline'; document.getElementById('1306.3036v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1306.3036v1-abstract-full" style="display: none;"> In this paper we present the biologically inspired Ripple Pond Network (RPN), a simply connected spiking neural network that, operating together with recently proposed PolyChronous Networks (PCN), enables rapid, unsupervised, scale and rotation invariant object recognition using efficient spatio-temporal spike coding. The RPN has been developed as a hardware solution linking previously implemented neuromorphic vision and memory structures capable of delivering end-to-end high-speed, low-power and low-resolution recognition for mobile and autonomous applications where slow, highly sophisticated and power hungry signal processing solutions are ineffective. Key aspects in the proposed approach include utilising the spatial properties of physically embedded neural networks and propagating waves of activity therein for information processing, using dimensional collapse of imagery information into amenable temporal patterns and the use of asynchronous frames for information binding. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1306.3036v1-abstract-full').style.display = 'none'; document.getElementById('1306.3036v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 June, 2013; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2013. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submitted to Frontiers in Neuromorphic Engineering (June 12, 2013)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1304.7118">arXiv:1304.7118</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1304.7118">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neurons and Cognition">q-bio.NC</span> </div> </div> <p class="title is-5 mathjax"> Synthesis of neural networks for spatio-temporal spike pattern recognition and processing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tapson%2C+J">J. Tapson</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">G. Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Afshar%2C+S">S. Afshar</a>, <a href="/search/cs?searchtype=author&amp;query=Stiefel%2C+K">K. Stiefel</a>, <a href="/search/cs?searchtype=author&amp;query=Buskila%2C+Y">Y. Buskila</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+R">R. Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Hamilton%2C+T+J">T. J. Hamilton</a>, <a href="/search/cs?searchtype=author&amp;query=van+Schaik%2C+A">A. van Schaik</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1304.7118v1-abstract-short" style="display: inline;"> The advent of large scale neural computational platforms has highlighted the lack of algorithms for synthesis of neural structures to perform predefined cognitive tasks. The Neural Engineering Framework offers one such synthesis, but it is most effective for a spike rate representation of neural information, and it requires a large number of neurons to implement simple functions. We describe a neu&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1304.7118v1-abstract-full').style.display = 'inline'; document.getElementById('1304.7118v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1304.7118v1-abstract-full" style="display: none;"> The advent of large scale neural computational platforms has highlighted the lack of algorithms for synthesis of neural structures to perform predefined cognitive tasks. The Neural Engineering Framework offers one such synthesis, but it is most effective for a spike rate representation of neural information, and it requires a large number of neurons to implement simple functions. We describe a neural network synthesis method that generates synaptic connectivity for neurons which process time-encoded neural signals, and which makes very sparse use of neurons. The method allows the user to specify, arbitrarily, neuronal characteristics such as axonal and dendritic delays, and synaptic transfer functions, and then solves for the optimal input-output relationship using computed dendritic weights. The method may be used for batch or online learning and has an extremely fast optimization process. We demonstrate its use in generating a network to recognize speech which is sparsely encoded as spike times. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1304.7118v1-abstract-full').style.display = 'none'; document.getElementById('1304.7118v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 April, 2013; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2013. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">In submission to Frontiers in Neuromorphic Engineering</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1209.0296">arXiv:1209.0296</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1209.0296">pdf</a>, <a href="https://arxiv.org/format/1209.0296">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Physics">physics.comp-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Other Computer Science">cs.OH</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1021/ct100385b">10.1021/ct100385b <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Simulating Lattice Spin Models on Graphics Processing Units </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Levy%2C+T">Tal Levy</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Guy Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Rabani%2C+E">Eran Rabani</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1209.0296v1-abstract-short" style="display: inline;"> Lattice spin models are useful for studying critical phenomena and allow the extraction of equilibrium and dynamical properties. Simulations of such systems are usually based on Monte Carlo (MC) techniques, and the main difficulty is often the large computational effort needed when approaching critical points. In this work, it is shown how such simulations can be accelerated with the use of NVIDIA&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1209.0296v1-abstract-full').style.display = 'inline'; document.getElementById('1209.0296v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1209.0296v1-abstract-full" style="display: none;"> Lattice spin models are useful for studying critical phenomena and allow the extraction of equilibrium and dynamical properties. Simulations of such systems are usually based on Monte Carlo (MC) techniques, and the main difficulty is often the large computational effort needed when approaching critical points. In this work, it is shown how such simulations can be accelerated with the use of NVIDIA graphics processing units (GPUs) using the CUDA programming architecture. We have developed two different algorithms for lattice spin models, the first useful for equilibrium properties near a second-order phase transition point and the second for dynamical slowing down near a glass transition. The algorithms are based on parallel MC techniques, and speedups from 70- to 150-fold over conventional single-threaded computer codes are obtained using consumer-grade hardware. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1209.0296v1-abstract-full').style.display = 'none'; document.getElementById('1209.0296v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 September, 2012; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2012. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Journal of Chemical Theory and Computation 6, 11, 3293-3301, 2010 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1105.3879">arXiv:1105.3879</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1105.3879">pdf</a>, <a href="https://arxiv.org/ps/1105.3879">ps</a>, <a href="https://arxiv.org/format/1105.3879">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Non-Malleable Codes from the Wire-Tap Channel </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chabanne%2C+H">Herv茅 Chabanne</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">G茅rard Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Flori%2C+J">Jean-Pierre Flori</a>, <a href="/search/cs?searchtype=author&amp;query=Patey%2C+A">Alain Patey</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1105.3879v1-abstract-short" style="display: inline;"> Recently, Dziembowski et al. introduced the notion of non-malleable codes (NMC), inspired from the notion of non-malleability in cryptography and the work of Gennaro et al. in 2004 on tamper proof security. Informally, when using NMC, if an attacker modifies a codeword, decoding this modified codeword will return either the original message or a completely unrelated value. The definition of NMC&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1105.3879v1-abstract-full').style.display = 'inline'; document.getElementById('1105.3879v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1105.3879v1-abstract-full" style="display: none;"> Recently, Dziembowski et al. introduced the notion of non-malleable codes (NMC), inspired from the notion of non-malleability in cryptography and the work of Gennaro et al. in 2004 on tamper proof security. Informally, when using NMC, if an attacker modifies a codeword, decoding this modified codeword will return either the original message or a completely unrelated value. The definition of NMC is related to a family of modifications authorized to the attacker. In their paper, Dziembowski et al. propose a construction valid for the family of all bit-wise independent functions. In this article, we study the link between the second version of the Wire-Tap (WT) Channel, introduced by Ozarow and Wyner in 1984, and NMC. Using coset-coding, we describe a new construction for NMC w.r.t. a subset of the family of bit-wise independent functions. Our scheme is easier to build and more efficient than the one proposed by Dziembowski et al. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1105.3879v1-abstract-full').style.display = 'none'; document.getElementById('1105.3879v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 May, 2011; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2011. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1009.3657">arXiv:1009.3657</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1009.3657">pdf</a>, <a href="https://arxiv.org/ps/1009.3657">ps</a>, <a href="https://arxiv.org/format/1009.3657">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> On Bounded Weight Codes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bachoc%2C+C">Christine Bachoc</a>, <a href="/search/cs?searchtype=author&amp;query=Chandar%2C+V">Venkat Chandar</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gerard Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Sole%2C+P">Patrick Sole</a>, <a href="/search/cs?searchtype=author&amp;query=Tchamkerten%2C+A">Aslan Tchamkerten</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1009.3657v1-abstract-short" style="display: inline;"> The maximum size of a binary code is studied as a function of its length N, minimum distance D, and minimum codeword weight W. This function B(N,D,W) is first characterized in terms of its exponential growth rate in the limit as N tends to infinity for fixed d=D/N and w=W/N. The exponential growth rate of B(N,D,W) is shown to be equal to the exponential growth rate of A(N,D) for w &lt;= 1/2, and equa&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1009.3657v1-abstract-full').style.display = 'inline'; document.getElementById('1009.3657v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1009.3657v1-abstract-full" style="display: none;"> The maximum size of a binary code is studied as a function of its length N, minimum distance D, and minimum codeword weight W. This function B(N,D,W) is first characterized in terms of its exponential growth rate in the limit as N tends to infinity for fixed d=D/N and w=W/N. The exponential growth rate of B(N,D,W) is shown to be equal to the exponential growth rate of A(N,D) for w &lt;= 1/2, and equal to the exponential growth rate of A(N,D,W) for 1/2&lt; w &lt;= 1. Second, analytic and numerical upper bounds on B(N,D,W) are derived using the semidefinite programming (SDP) method. These bounds yield a non-asymptotic improvement of the second Johnson bound and are tight for certain values of the parameters. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1009.3657v1-abstract-full').style.display = 'none'; document.getElementById('1009.3657v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 September, 2010; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2010. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1005.2281">arXiv:1005.2281</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1005.2281">pdf</a>, <a href="https://arxiv.org/format/1005.2281">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Discrete Mathematics">cs.DM</span> </div> </div> <p class="title is-5 mathjax"> A new algebraic technique for polynomial-time computing the number modulo 2 of Hamiltonian decompositions and similar partitions of a graph&#39;s edge set </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Greg Cohen</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1005.2281v1-abstract-short" style="display: inline;"> In Graph Theory a number of results were devoted to studying the computational complexity of the number modulo 2 of a graph&#39;s edge set decompositions of various kinds, first of all including its Hamiltonian decompositions, as well as the number modulo 2 of, say, Hamiltonian cycles/paths etc. While the problems of finding a Hamiltonian decomposition and Hamiltonian cycle are NP-complete, counting t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1005.2281v1-abstract-full').style.display = 'inline'; document.getElementById('1005.2281v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1005.2281v1-abstract-full" style="display: none;"> In Graph Theory a number of results were devoted to studying the computational complexity of the number modulo 2 of a graph&#39;s edge set decompositions of various kinds, first of all including its Hamiltonian decompositions, as well as the number modulo 2 of, say, Hamiltonian cycles/paths etc. While the problems of finding a Hamiltonian decomposition and Hamiltonian cycle are NP-complete, counting these objects modulo 2 in polynomial time is yet possible for certain types of regular undirected graphs. Some of the most known examples are the theorems about the existence of an even number of Hamiltonian decompositions in a 4-regular graph and an even number of such decompositions where two given edges e and g belong to different cycles (Thomason, 1978), as well as an even number of Hamiltonian cycles passing through any given edge in a regular odd-degreed graph (Smith&#39;s theorem). The present article introduces a new algebraic technique which generalizes the notion of counting modulo 2 via applying fields of Characteristic 2 and determinants and, for instance, allows to receive a polynomial-time formula for the number modulo 2 of a 4-regular bipartite graph&#39;s Hamiltonian decompositions such that a given edge and a given path of length 2 belong to different Hamiltonian cycles - hence refining/extending (in a computational sense) Thomason&#39;s result for bipartite graphs. This technique also provides a polynomial-time calculation of the number modulo 2 of a graph&#39;s edge set decompositions into simple cycles each containing at least one element of a given set of its edges what is a similar kind of extension of Thomason&#39;s theorem as well. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1005.2281v1-abstract-full').style.display = 'none'; document.getElementById('1005.2281v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 May, 2010; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2010. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">The present article introduces a new algebraic technique which generalizes the notion of counting modulo 2 via applying fields of Characteristic 2 and determinants</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> ----- </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1001.4992">arXiv:1001.4992</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1001.4992">pdf</a>, <a href="https://arxiv.org/ps/1001.4992">ps</a>, <a href="https://arxiv.org/format/1001.4992">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/WIFS.2009.5386458">10.1109/WIFS.2009.5386458 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> RFID Key Establishment Against Active Adversaries </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bringer%2C+J">Julien Bringer</a>, <a href="/search/cs?searchtype=author&amp;query=Chabanne%2C+H">Herv茅 Chabanne</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">G茅rard Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Kindarji%2C+B">Bruno Kindarji</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1001.4992v2-abstract-short" style="display: inline;"> We present a method to strengthen a very low cost solution for key agreement with a RFID device. Starting from a work which exploits the inherent noise on the communication link to establish a key by public discussion, we show how to protect this agreement against active adversaries. For that purpose, we unravel integrity $(I)$-codes suggested by Cagalj et al. No preliminary key distribution&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1001.4992v2-abstract-full').style.display = 'inline'; document.getElementById('1001.4992v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1001.4992v2-abstract-full" style="display: none;"> We present a method to strengthen a very low cost solution for key agreement with a RFID device. Starting from a work which exploits the inherent noise on the communication link to establish a key by public discussion, we show how to protect this agreement against active adversaries. For that purpose, we unravel integrity $(I)$-codes suggested by Cagalj et al. No preliminary key distribution is required. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1001.4992v2-abstract-full').style.display = 'none'; document.getElementById('1001.4992v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 January, 2010; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 January, 2010; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2010. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This work was presented at the First IEEE Workshop on Information Forensics and Security (WIFS&#39;09) (update including minor remarks and references to match the presented version)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1001.2463">arXiv:1001.2463</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1001.2463">pdf</a>, <a href="https://arxiv.org/ps/1001.2463">ps</a>, <a href="https://arxiv.org/format/1001.2463">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Discrete Mathematics">cs.DM</span> </div> </div> <p class="title is-5 mathjax"> On the Threshold of Maximum-Distance Separable Codes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kindarji%2C+B">Bruno Kindarji</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">G茅rard Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Chabanne%2C+H">Herv茅 Chabanne</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1001.2463v1-abstract-short" style="display: inline;"> Starting from a practical use of Reed-Solomon codes in a cryptographic scheme published in Indocrypt&#39;09, this paper deals with the threshold of linear $q$-ary error-correcting codes. The security of this scheme is based on the intractability of polynomial reconstruction when there is too much noise in the vector. Our approach switches from this paradigm to an Information Theoretical point of vie&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1001.2463v1-abstract-full').style.display = 'inline'; document.getElementById('1001.2463v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1001.2463v1-abstract-full" style="display: none;"> Starting from a practical use of Reed-Solomon codes in a cryptographic scheme published in Indocrypt&#39;09, this paper deals with the threshold of linear $q$-ary error-correcting codes. The security of this scheme is based on the intractability of polynomial reconstruction when there is too much noise in the vector. Our approach switches from this paradigm to an Information Theoretical point of view: is there a class of elements that are so far away from the code that the list size is always superpolynomial? Or, dually speaking, is Maximum-Likelihood decoding almost surely impossible? We relate this issue to the decoding threshold of a code, and show that when the minimal distance of the code is high enough, the threshold effect is very sharp. In a second part, we explicit lower-bounds on the threshold of Maximum-Distance Separable codes such as Reed-Solomon codes, and compute the threshold for the toy example that motivates this study. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1001.2463v1-abstract-full').style.display = 'none'; document.getElementById('1001.2463v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 January, 2010; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2010. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Sumitted to ISIT 2010</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/0809.1522">arXiv:0809.1522</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/0809.1522">pdf</a>, <a href="https://arxiv.org/ps/0809.1522">ps</a>, <a href="https://arxiv.org/format/0809.1522">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Combinatorics">math.CO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> On the permutation capacity of digraphs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">Gerard Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Fachini%2C+E">Emanuela Fachini</a>, <a href="/search/cs?searchtype=author&amp;query=Korner%2C+J">Janos Korner</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="0809.1522v1-abstract-short" style="display: inline;"> We extend several results of the third author and C. Malvenuto on graph-different permutations to the case of directed graphs and introduce new open problems. Permutation capacity is a natural extension of Sperner capacity from finite directed graphs to infinite digraphs. Our subject is combinatorial in nature, but can be equally regarded as zero-error information theory. </span> <span class="abstract-full has-text-grey-dark mathjax" id="0809.1522v1-abstract-full" style="display: none;"> We extend several results of the third author and C. Malvenuto on graph-different permutations to the case of directed graphs and introduce new open problems. Permutation capacity is a natural extension of Sperner capacity from finite directed graphs to infinite digraphs. Our subject is combinatorial in nature, but can be equally regarded as zero-error information theory. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('0809.1522v1-abstract-full').style.display = 'none'; document.getElementById('0809.1522v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 September, 2008; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2008. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, no figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 05D05; 05C69: 05C69; 94A24 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/0705.3740">arXiv:0705.3740</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/0705.3740">pdf</a>, <a href="https://arxiv.org/ps/0705.3740">ps</a>, <a href="https://arxiv.org/format/0705.3740">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/BTAS.2007.4401904">10.1109/BTAS.2007.4401904 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Optimal Iris Fuzzy Sketches </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bringer%2C+J">J. Bringer</a>, <a href="/search/cs?searchtype=author&amp;query=Chabanne%2C+H">H. Chabanne</a>, <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">G. Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Kindarji%2C+B">B. Kindarji</a>, <a href="/search/cs?searchtype=author&amp;query=Z%C3%A9mor%2C+G">G. Z茅mor</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="0705.3740v1-abstract-short" style="display: inline;"> Fuzzy sketches, introduced as a link between biometry and cryptography, are a way of handling biometric data matching as an error correction issue. We focus here on iris biometrics and look for the best error-correcting code in that respect. We show that two-dimensional iterative min-sum decoding leads to results near the theoretical limits. In particular, we experiment our techniques on the Iri&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('0705.3740v1-abstract-full').style.display = 'inline'; document.getElementById('0705.3740v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="0705.3740v1-abstract-full" style="display: none;"> Fuzzy sketches, introduced as a link between biometry and cryptography, are a way of handling biometric data matching as an error correction issue. We focus here on iris biometrics and look for the best error-correcting code in that respect. We show that two-dimensional iterative min-sum decoding leads to results near the theoretical limits. In particular, we experiment our techniques on the Iris Challenge Evaluation (ICE) database and validate our findings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('0705.3740v1-abstract-full').style.display = 'none'; document.getElementById('0705.3740v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 May, 2007; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2007. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages. Submitted to the IEEE Conference on Biometrics: Theory, Applications and Systems, 2007 Washington DC</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> E.3; E.4 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Biometrics: Theory, Applications, and Systems, 2007. BTAS 2007. First IEEE International Conference on </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/cs/0507015">arXiv:cs/0507015</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/cs/0507015">pdf</a>, <a href="https://arxiv.org/ps/cs/0507015">ps</a>, <a href="https://arxiv.org/format/cs/0507015">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Discrete Mathematics">cs.DM</span> </div> </div> <p class="title is-5 mathjax"> Duality between Packings and Coverings of the Hamming Space </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cohen%2C+G">G茅rard Cohen</a>, <a href="/search/cs?searchtype=author&amp;query=Vardy%2C+A">Alexander Vardy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="cs/0507015v1-abstract-short" style="display: inline;"> We investigate the packing and covering densities of linear and nonlinear binary codes, and establish a number of duality relationships between the packing and covering problems. Specifically, we prove that if almost all codes (in the class of linear or nonlinear codes) are good packings, then only a vanishing fraction of codes are good coverings, and vice versa: if almost all codes are good cov&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('cs/0507015v1-abstract-full').style.display = 'inline'; document.getElementById('cs/0507015v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="cs/0507015v1-abstract-full" style="display: none;"> We investigate the packing and covering densities of linear and nonlinear binary codes, and establish a number of duality relationships between the packing and covering problems. Specifically, we prove that if almost all codes (in the class of linear or nonlinear codes) are good packings, then only a vanishing fraction of codes are good coverings, and vice versa: if almost all codes are good coverings, then at most a vanishing fraction of codes are good packings. We also show that any specific maximal binary code is either a good packing or a good covering, in a certain well-defined sense. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('cs/0507015v1-abstract-full').style.display = 'none'; document.getElementById('cs/0507015v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 July, 2005; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2005. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> E.2; G.2.1 </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10