CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–24 of 24 results for author: <span class="mathjax">Manica, M</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&query=Manica%2C+M">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Manica, M"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Manica%2C+M&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Manica, M"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2412.18549">arXiv:2412.18549</a> <span> [<a href="https://arxiv.org/pdf/2412.18549">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Adaptation and Self-Organizing Systems">nlin.AO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> </div> </div> <p class="title is-5 mathjax"> Post-pandemic social contacts in Italy: implications for social distancing measures on in-person school and work attendance </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Lucchini%2C+L">Lorenzo Lucchini</a>, <a href="/search/cs?searchtype=author&query=Marziano%2C+V">Valentina Marziano</a>, <a href="/search/cs?searchtype=author&query=Trentini%2C+F">Filippo Trentini</a>, <a href="/search/cs?searchtype=author&query=Chiavenna%2C+C">Chiara Chiavenna</a>, <a href="/search/cs?searchtype=author&query=D%27Agnese%2C+E">Elena D'Agnese</a>, <a href="/search/cs?searchtype=author&query=Offeddu%2C+V">Vittoria Offeddu</a>, <a href="/search/cs?searchtype=author&query=Manica%2C+M">Mattia Manica</a>, <a href="/search/cs?searchtype=author&query=Poletti%2C+P">Piero Poletti</a>, <a href="/search/cs?searchtype=author&query=Balsamo%2C+D">Duilio Balsamo</a>, <a href="/search/cs?searchtype=author&query=Guzzetta%2C+G">Giorgio Guzzetta</a>, <a href="/search/cs?searchtype=author&query=Aielli%2C+M">Marco Aielli</a>, <a href="/search/cs?searchtype=author&query=Melegaro%2C+A">Alessia Melegaro</a>, <a href="/search/cs?searchtype=author&query=Merler%2C+S">Stefano Merler</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2412.18549v1-abstract-short" style="display: inline;"> The collection of updated data on social contact patterns following the COVID-19 pandemic disruptions is crucial for future epidemiological assessments and evaluating non-pharmaceutical interventions (NPIs) based on physical distancing. We conducted two waves of an online survey in March 2022 and March 2023 in Italy, gathering data from a representative population sample on direct (verbal/physical… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.18549v1-abstract-full').style.display = 'inline'; document.getElementById('2412.18549v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2412.18549v1-abstract-full" style="display: none;"> The collection of updated data on social contact patterns following the COVID-19 pandemic disruptions is crucial for future epidemiological assessments and evaluating non-pharmaceutical interventions (NPIs) based on physical distancing. We conducted two waves of an online survey in March 2022 and March 2023 in Italy, gathering data from a representative population sample on direct (verbal/physical interactions) and indirect (prolonged co-location in indoor spaces) contacts. Using a generalized linear mixed model, we examined determinants of individuals' total social contacts and evaluated the potential impact of work-from-home and distance learning on the transmissibility of respiratory pathogens. In-person attendance at work or school emerged as a primary driver of social contacts. Adults attending in person reported a mean of 1.69 (95% CI: 1.56-1.84) times the contacts of those staying home; among children and adolescents, this ratio increased to 2.38 (95% CI: 1.98-2.87). We estimated that suspending all non-essential work alone would marginally reduce transmissibility. However, combining distance learning for all education levels with work-from-home policies could decrease transmissibility by up to 23.7% (95% CI: 18.2%-29.0%). Extending these measures to early childcare services would yield only minimal additional benefits. These results provide useful data for modelling the transmission of respiratory pathogens in Italy after the end of the COVID-19 emergency. They also provide insights into the potential epidemiological effectiveness of social distancing interventions targeting work and school attendance, supporting considerations on the balance between the expected benefits and their heavy societal costs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.18549v1-abstract-full').style.display = 'none'; document.getElementById('2412.18549v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 2 tables, 3 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.13239">arXiv:2411.13239</a> <span> [<a href="https://arxiv.org/pdf/2411.13239">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Emerging Technologies">cs.ET</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> </div> </div> <p class="title is-5 mathjax"> Transforming the Hybrid Cloud for Emerging AI Workloads </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chen%2C+D">Deming Chen</a>, <a href="/search/cs?searchtype=author&query=Youssef%2C+A">Alaa Youssef</a>, <a href="/search/cs?searchtype=author&query=Pendse%2C+R">Ruchi Pendse</a>, <a href="/search/cs?searchtype=author&query=Schleife%2C+A">Andr茅 Schleife</a>, <a href="/search/cs?searchtype=author&query=Clark%2C+B+K">Bryan K. Clark</a>, <a href="/search/cs?searchtype=author&query=Hamann%2C+H">Hendrik Hamann</a>, <a href="/search/cs?searchtype=author&query=He%2C+J">Jingrui He</a>, <a href="/search/cs?searchtype=author&query=Laino%2C+T">Teodoro Laino</a>, <a href="/search/cs?searchtype=author&query=Varshney%2C+L">Lav Varshney</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+Y">Yuxiong Wang</a>, <a href="/search/cs?searchtype=author&query=Sil%2C+A">Avirup Sil</a>, <a href="/search/cs?searchtype=author&query=Jabbarvand%2C+R">Reyhaneh Jabbarvand</a>, <a href="/search/cs?searchtype=author&query=Xu%2C+T">Tianyin Xu</a>, <a href="/search/cs?searchtype=author&query=Kindratenko%2C+V">Volodymyr Kindratenko</a>, <a href="/search/cs?searchtype=author&query=Costa%2C+C">Carlos Costa</a>, <a href="/search/cs?searchtype=author&query=Adve%2C+S">Sarita Adve</a>, <a href="/search/cs?searchtype=author&query=Mendis%2C+C">Charith Mendis</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+M">Minjia Zhang</a>, <a href="/search/cs?searchtype=author&query=N%C3%BA%C3%B1ez-Corrales%2C+S">Santiago N煤帽ez-Corrales</a>, <a href="/search/cs?searchtype=author&query=Ganti%2C+R">Raghu Ganti</a>, <a href="/search/cs?searchtype=author&query=Srivatsa%2C+M">Mudhakar Srivatsa</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+N+S">Nam Sung Kim</a>, <a href="/search/cs?searchtype=author&query=Torrellas%2C+J">Josep Torrellas</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+J">Jian Huang</a>, <a href="/search/cs?searchtype=author&query=Seelam%2C+S">Seetharami Seelam</a> , et al. (19 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.13239v1-abstract-short" style="display: inline;"> This white paper, developed through close collaboration between IBM Research and UIUC researchers within the IIDAI Institute, envisions transforming hybrid cloud systems to meet the growing complexity of AI workloads through innovative, full-stack co-design approaches, emphasizing usability, manageability, affordability, adaptability, efficiency, and scalability. By integrating cutting-edge techno… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13239v1-abstract-full').style.display = 'inline'; document.getElementById('2411.13239v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.13239v1-abstract-full" style="display: none;"> This white paper, developed through close collaboration between IBM Research and UIUC researchers within the IIDAI Institute, envisions transforming hybrid cloud systems to meet the growing complexity of AI workloads through innovative, full-stack co-design approaches, emphasizing usability, manageability, affordability, adaptability, efficiency, and scalability. By integrating cutting-edge technologies such as generative and agentic AI, cross-layer automation and optimization, unified control plane, and composable and adaptive system architecture, the proposed framework addresses critical challenges in energy efficiency, performance, and cost-effectiveness. Incorporating quantum computing as it matures will enable quantum-accelerated simulations for materials science, climate modeling, and other high-impact domains. Collaborative efforts between academia and industry are central to this vision, driving advancements in foundation models for material design and climate solutions, scalable multimodal data processing, and enhanced physics-based AI emulators for applications like weather forecasting and carbon sequestration. Research priorities include advancing AI agentic systems, LLM as an Abstraction (LLMaaA), AI model optimization and unified abstractions across heterogeneous infrastructure, end-to-end edge-cloud transformation, efficient programming model, middleware and platform, secure infrastructure, application-adaptive cloud systems, and new quantum-classical collaborative workflows. These ideas and solutions encompass both theoretical and practical research questions, requiring coordinated input and support from the research community. This joint initiative aims to establish hybrid clouds as secure, efficient, and sustainable platforms, fostering breakthroughs in AI-driven applications and scientific discovery across academia, industry, and society. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.13239v1-abstract-full').style.display = 'none'; document.getElementById('2411.13239v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">70 pages, 27 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2301.12586">arXiv:2301.12586</a> <span> [<a href="https://arxiv.org/pdf/2301.12586">pdf</a>, <a href="https://arxiv.org/format/2301.12586">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Unifying Molecular and Textual Representations via Multi-task Language Modelling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Christofidellis%2C+D">Dimitrios Christofidellis</a>, <a href="/search/cs?searchtype=author&query=Giannone%2C+G">Giorgio Giannone</a>, <a href="/search/cs?searchtype=author&query=Born%2C+J">Jannis Born</a>, <a href="/search/cs?searchtype=author&query=Winther%2C+O">Ole Winther</a>, <a href="/search/cs?searchtype=author&query=Laino%2C+T">Teodoro Laino</a>, <a href="/search/cs?searchtype=author&query=Manica%2C+M">Matteo Manica</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2301.12586v2-abstract-short" style="display: inline;"> The recent advances in neural language models have also been successfully applied to the field of chemistry, offering generative solutions for classical problems in molecular design and synthesis planning. These new methods have the potential to fuel a new era of data-driven automation in scientific discovery. However, specialized models are still typically required for each task, leading to the n… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.12586v2-abstract-full').style.display = 'inline'; document.getElementById('2301.12586v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2301.12586v2-abstract-full" style="display: none;"> The recent advances in neural language models have also been successfully applied to the field of chemistry, offering generative solutions for classical problems in molecular design and synthesis planning. These new methods have the potential to fuel a new era of data-driven automation in scientific discovery. However, specialized models are still typically required for each task, leading to the need for problem-specific fine-tuning and neglecting task interrelations. The main obstacle in this field is the lack of a unified representation between natural language and chemical representations, complicating and limiting human-machine interaction. Here, we propose the first multi-domain, multi-task language model that can solve a wide range of tasks in both the chemical and natural language domains. Our model can handle chemical and natural language concurrently, without requiring expensive pre-training on single domains or task-specific models. Interestingly, sharing weights across domains remarkably improves our model when benchmarked against state-of-the-art baselines on single-domain and cross-domain tasks. In particular, sharing information across domains and tasks gives rise to large improvements in cross-domain tasks, the magnitude of which increase with scale, as measured by more than a dozen of relevant metrics. Our work suggests that such models can robustly and efficiently accelerate discovery in physical sciences by superseding problem-specific fine-tuning and enhancing human-model interactions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.12586v2-abstract-full').style.display = 'none'; document.getElementById('2301.12586v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ICML 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2301.08750">arXiv:2301.08750</a> <span> [<a href="https://arxiv.org/pdf/2301.08750">pdf</a>, <a href="https://arxiv.org/format/2301.08750">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Domain-agnostic and Multi-level Evaluation of Generative Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Tadesse%2C+G+A">Girmaw Abebe Tadesse</a>, <a href="/search/cs?searchtype=author&query=Born%2C+J">Jannis Born</a>, <a href="/search/cs?searchtype=author&query=Cintas%2C+C">Celia Cintas</a>, <a href="/search/cs?searchtype=author&query=Ogallo%2C+W">William Ogallo</a>, <a href="/search/cs?searchtype=author&query=Zubarev%2C+D">Dmitry Zubarev</a>, <a href="/search/cs?searchtype=author&query=Manica%2C+M">Matteo Manica</a>, <a href="/search/cs?searchtype=author&query=Weldemariam%2C+K">Komminist Weldemariam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2301.08750v1-abstract-short" style="display: inline;"> While the capabilities of generative models heavily improved in different domains (images, text, graphs, molecules, etc.), their evaluation metrics largely remain based on simplified quantities or manual inspection with limited practicality. To this end, we propose a framework for Multi-level Performance Evaluation of Generative mOdels (MPEGO), which could be employed across different domains. MPE… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.08750v1-abstract-full').style.display = 'inline'; document.getElementById('2301.08750v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2301.08750v1-abstract-full" style="display: none;"> While the capabilities of generative models heavily improved in different domains (images, text, graphs, molecules, etc.), their evaluation metrics largely remain based on simplified quantities or manual inspection with limited practicality. To this end, we propose a framework for Multi-level Performance Evaluation of Generative mOdels (MPEGO), which could be employed across different domains. MPEGO aims to quantify generation performance hierarchically, starting from a sub-feature-based low-level evaluation to a global features-based high-level evaluation. MPEGO offers great customizability as the employed features are entirely user-driven and can thus be highly domain/problem-specific while being arbitrarily complex (e.g., outcomes of experimental procedures). We validate MPEGO using multiple generative models across several datasets from the material discovery domain. An ablation study is conducted to study the plausibility of intermediate steps in MPEGO. Results demonstrate that MPEGO provides a flexible, user-driven, and multi-level evaluation framework, with practical insights on the generation quality. The framework, source code, and experiments will be available at https://github.com/GT4SD/mpego. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.08750v1-abstract-full').style.display = 'none'; document.getElementById('2301.08750v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.05100">arXiv:2211.05100</a> <span> [<a href="https://arxiv.org/pdf/2211.05100">pdf</a>, <a href="https://arxiv.org/format/2211.05100">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> BLOOM: A 176B-Parameter Open-Access Multilingual Language Model </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Workshop%2C+B">BigScience Workshop</a>, <a href="/search/cs?searchtype=author&query=%3A"> :</a>, <a href="/search/cs?searchtype=author&query=Scao%2C+T+L">Teven Le Scao</a>, <a href="/search/cs?searchtype=author&query=Fan%2C+A">Angela Fan</a>, <a href="/search/cs?searchtype=author&query=Akiki%2C+C">Christopher Akiki</a>, <a href="/search/cs?searchtype=author&query=Pavlick%2C+E">Ellie Pavlick</a>, <a href="/search/cs?searchtype=author&query=Ili%C4%87%2C+S">Suzana Ili膰</a>, <a href="/search/cs?searchtype=author&query=Hesslow%2C+D">Daniel Hesslow</a>, <a href="/search/cs?searchtype=author&query=Castagn%C3%A9%2C+R">Roman Castagn茅</a>, <a href="/search/cs?searchtype=author&query=Luccioni%2C+A+S">Alexandra Sasha Luccioni</a>, <a href="/search/cs?searchtype=author&query=Yvon%2C+F">Fran莽ois Yvon</a>, <a href="/search/cs?searchtype=author&query=Gall%C3%A9%2C+M">Matthias Gall茅</a>, <a href="/search/cs?searchtype=author&query=Tow%2C+J">Jonathan Tow</a>, <a href="/search/cs?searchtype=author&query=Rush%2C+A+M">Alexander M. Rush</a>, <a href="/search/cs?searchtype=author&query=Biderman%2C+S">Stella Biderman</a>, <a href="/search/cs?searchtype=author&query=Webson%2C+A">Albert Webson</a>, <a href="/search/cs?searchtype=author&query=Ammanamanchi%2C+P+S">Pawan Sasanka Ammanamanchi</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+T">Thomas Wang</a>, <a href="/search/cs?searchtype=author&query=Sagot%2C+B">Beno卯t Sagot</a>, <a href="/search/cs?searchtype=author&query=Muennighoff%2C+N">Niklas Muennighoff</a>, <a href="/search/cs?searchtype=author&query=del+Moral%2C+A+V">Albert Villanova del Moral</a>, <a href="/search/cs?searchtype=author&query=Ruwase%2C+O">Olatunji Ruwase</a>, <a href="/search/cs?searchtype=author&query=Bawden%2C+R">Rachel Bawden</a>, <a href="/search/cs?searchtype=author&query=Bekman%2C+S">Stas Bekman</a>, <a href="/search/cs?searchtype=author&query=McMillan-Major%2C+A">Angelina McMillan-Major</a> , et al. (369 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.05100v4-abstract-short" style="display: inline;"> Large language models (LLMs) have been shown to be able to perform new tasks based on a few demonstrations or natural language instructions. While these capabilities have led to widespread adoption, most LLMs are developed by resource-rich organizations and are frequently kept from the public. As a step towards democratizing this powerful technology, we present BLOOM, a 176B-parameter open-access… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.05100v4-abstract-full').style.display = 'inline'; document.getElementById('2211.05100v4-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.05100v4-abstract-full" style="display: none;"> Large language models (LLMs) have been shown to be able to perform new tasks based on a few demonstrations or natural language instructions. While these capabilities have led to widespread adoption, most LLMs are developed by resource-rich organizations and are frequently kept from the public. As a step towards democratizing this powerful technology, we present BLOOM, a 176B-parameter open-access language model designed and built thanks to a collaboration of hundreds of researchers. BLOOM is a decoder-only Transformer language model that was trained on the ROOTS corpus, a dataset comprising hundreds of sources in 46 natural and 13 programming languages (59 in total). We find that BLOOM achieves competitive performance on a wide variety of benchmarks, with stronger results after undergoing multitask prompted finetuning. To facilitate future research and applications using LLMs, we publicly release our models and code under the Responsible AI License. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.05100v4-abstract-full').style.display = 'none'; document.getElementById('2211.05100v4-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2208.07084">arXiv:2208.07084</a> <span> [<a href="https://arxiv.org/pdf/2208.07084">pdf</a>, <a href="https://arxiv.org/format/2208.07084">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Z-BERT-A: a zero-shot Pipeline for Unknown Intent detection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Comi%2C+D">Daniele Comi</a>, <a href="/search/cs?searchtype=author&query=Christofidellis%2C+D">Dimitrios Christofidellis</a>, <a href="/search/cs?searchtype=author&query=Piazza%2C+P+F">Pier Francesco Piazza</a>, <a href="/search/cs?searchtype=author&query=Manica%2C+M">Matteo Manica</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2208.07084v3-abstract-short" style="display: inline;"> Intent discovery is a crucial task in natural language processing, and it is increasingly relevant for various of industrial applications. Identifying novel, unseen intents from user inputs remains one of the biggest challenges in this field. Herein, we propose Zero-Shot-BERT-Adapters, a two-stage method for multilingual intent discovery relying on a Transformer architecture, fine-tuned with Adapt… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.07084v3-abstract-full').style.display = 'inline'; document.getElementById('2208.07084v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2208.07084v3-abstract-full" style="display: none;"> Intent discovery is a crucial task in natural language processing, and it is increasingly relevant for various of industrial applications. Identifying novel, unseen intents from user inputs remains one of the biggest challenges in this field. Herein, we propose Zero-Shot-BERT-Adapters, a two-stage method for multilingual intent discovery relying on a Transformer architecture, fine-tuned with Adapters. We train the model for Natural Language Inference (NLI) and later perform unknown intent classification in a zero-shot setting for multiple languages. In our evaluation, we first analyze the quality of the model after adaptive fine-tuning on known classes. Secondly, we evaluate its performance in casting intent classification as an NLI task. Lastly, we test the zero-shot performance of the model on unseen classes, showing how Zero-Shot-BERT-Adapters can effectively perform intent discovery by generating semantically similar intents, if not equal, to the ground-truth ones. Our experiments show how Zero-Shot-BERT-Adapters outperforms various baselines in two zero-shot settings: known intent classification and unseen intent discovery. The proposed pipeline holds the potential for broad application in customer care. It enables automated dynamic triage using a lightweight model that can be easily deployed and scaled in various business scenarios, unlike large language models. Zero-Shot-BERT-Adapters represents an innovative multi-language approach for intent discovery, enabling the online generation of novel intents. A Python package implementing the pipeline and the new datasets we compiled are available at the following link: https://github.com/GT4SD/zero-shot-bert-adapters. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.07084v3-abstract-full').style.display = 'none'; document.getElementById('2208.07084v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 4 figures, 14 tables, https://github.com/GT4SD/zberta</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2207.03928">arXiv:2207.03928</a> <span> [<a href="https://arxiv.org/pdf/2207.03928">pdf</a>, <a href="https://arxiv.org/format/2207.03928">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41524-023-01028-1">10.1038/s41524-023-01028-1 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Accelerating Material Design with the Generative Toolkit for Scientific Discovery </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Manica%2C+M">Matteo Manica</a>, <a href="/search/cs?searchtype=author&query=Born%2C+J">Jannis Born</a>, <a href="/search/cs?searchtype=author&query=Cadow%2C+J">Joris Cadow</a>, <a href="/search/cs?searchtype=author&query=Christofidellis%2C+D">Dimitrios Christofidellis</a>, <a href="/search/cs?searchtype=author&query=Dave%2C+A">Ashish Dave</a>, <a href="/search/cs?searchtype=author&query=Clarke%2C+D">Dean Clarke</a>, <a href="/search/cs?searchtype=author&query=Teukam%2C+Y+G+N">Yves Gaetan Nana Teukam</a>, <a href="/search/cs?searchtype=author&query=Giannone%2C+G">Giorgio Giannone</a>, <a href="/search/cs?searchtype=author&query=Hoffman%2C+S+C">Samuel C. Hoffman</a>, <a href="/search/cs?searchtype=author&query=Buchan%2C+M">Matthew Buchan</a>, <a href="/search/cs?searchtype=author&query=Chenthamarakshan%2C+V">Vijil Chenthamarakshan</a>, <a href="/search/cs?searchtype=author&query=Donovan%2C+T">Timothy Donovan</a>, <a href="/search/cs?searchtype=author&query=Hsu%2C+H+H">Hsiang Han Hsu</a>, <a href="/search/cs?searchtype=author&query=Zipoli%2C+F">Federico Zipoli</a>, <a href="/search/cs?searchtype=author&query=Schilter%2C+O">Oliver Schilter</a>, <a href="/search/cs?searchtype=author&query=Kishimoto%2C+A">Akihiro Kishimoto</a>, <a href="/search/cs?searchtype=author&query=Hamada%2C+L">Lisa Hamada</a>, <a href="/search/cs?searchtype=author&query=Padhi%2C+I">Inkit Padhi</a>, <a href="/search/cs?searchtype=author&query=Wehden%2C+K">Karl Wehden</a>, <a href="/search/cs?searchtype=author&query=McHugh%2C+L">Lauren McHugh</a>, <a href="/search/cs?searchtype=author&query=Khrabrov%2C+A">Alexy Khrabrov</a>, <a href="/search/cs?searchtype=author&query=Das%2C+P">Payel Das</a>, <a href="/search/cs?searchtype=author&query=Takeda%2C+S">Seiji Takeda</a>, <a href="/search/cs?searchtype=author&query=Smith%2C+J+R">John R. Smith</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2207.03928v4-abstract-short" style="display: inline;"> With the growing availability of data within various scientific domains, generative models hold enormous potential to accelerate scientific discovery. They harness powerful representations learned from datasets to speed up the formulation of novel hypotheses with the potential to impact material discovery broadly. We present the Generative Toolkit for Scientific Discovery (GT4SD). This extensible… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.03928v4-abstract-full').style.display = 'inline'; document.getElementById('2207.03928v4-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2207.03928v4-abstract-full" style="display: none;"> With the growing availability of data within various scientific domains, generative models hold enormous potential to accelerate scientific discovery. They harness powerful representations learned from datasets to speed up the formulation of novel hypotheses with the potential to impact material discovery broadly. We present the Generative Toolkit for Scientific Discovery (GT4SD). This extensible open-source library enables scientists, developers, and researchers to train and use state-of-the-art generative models to accelerate scientific discovery focused on material design. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.03928v4-abstract-full').style.display = 'none'; document.getElementById('2207.03928v4-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages, 2 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Nature Partner Journals (npj) Computational Materials 9, 69 (2023) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2202.01338">arXiv:2202.01338</a> <span> [<a href="https://arxiv.org/pdf/2202.01338">pdf</a>, <a href="https://arxiv.org/format/2202.01338">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Biomolecules">q-bio.BM</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s42256-023-00639-z">10.1038/s42256-023-00639-z <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Regression Transformer: Concurrent sequence regression and generation for molecular language modeling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Born%2C+J">Jannis Born</a>, <a href="/search/cs?searchtype=author&query=Manica%2C+M">Matteo Manica</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2202.01338v3-abstract-short" style="display: inline;"> Despite significant progress of generative models in the natural sciences, their controllability remains challenging. One fundamentally missing aspect of molecular or protein generative models is an inductive bias that can reflect continuous properties of interest. To that end, we propose the Regression Transformer (RT), a novel method that abstracts regression as a conditional sequence modeling p… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.01338v3-abstract-full').style.display = 'inline'; document.getElementById('2202.01338v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2202.01338v3-abstract-full" style="display: none;"> Despite significant progress of generative models in the natural sciences, their controllability remains challenging. One fundamentally missing aspect of molecular or protein generative models is an inductive bias that can reflect continuous properties of interest. To that end, we propose the Regression Transformer (RT), a novel method that abstracts regression as a conditional sequence modeling problem. This introduces a new paradigm of multitask language models which seamlessly bridge sequence regression and conditional sequence generation. We thoroughly demonstrate that, despite using a nominal-scale training objective, the RT matches or surpasses the performance of conventional regression models in property prediction tasks of small molecules, proteins and chemical reactions. Critically, priming the same model with continuous properties yields a highly competitive conditional generative model that outperforms specialized approaches in a substructure-constrained, property-driven molecule generation benchmark. Our dichotomous approach is facilitated by a novel, alternating training scheme that enables the model to decorate seed sequences by desired properties, e.g., to optimize reaction yield. In sum, the RT is the first report of a multitask model that concurrently excels at predictive and generative tasks in biochemistry. This finds particular application in property-driven, local exploration of the chemical or protein space and could pave the road toward foundation models in material design. The code to reproduce all experiments of the paper is available at: https://github.com/IBM/regression-transformer <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.01338v3-abstract-full').style.display = 'none'; document.getElementById('2202.01338v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 1 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Updated paper, under review; Preliminary version as spotlight talk at ICLR 2022 workshop on Machine Learning for Drug Discovery</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Nature Machine Intelligence 5, 432-444 (2023) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2111.05654">arXiv:2111.05654</a> <span> [<a href="https://arxiv.org/pdf/2111.05654">pdf</a>, <a href="https://arxiv.org/format/2111.05654">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> Utilising urgent computing to tackle the spread of mosquito-borne diseases </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Brown%2C+N">Nick Brown</a>, <a href="/search/cs?searchtype=author&query=Nash%2C+R">Rupert Nash</a>, <a href="/search/cs?searchtype=author&query=Poletti%2C+P">Piero Poletti</a>, <a href="/search/cs?searchtype=author&query=Guzzetta%2C+G">Giorgio Guzzetta</a>, <a href="/search/cs?searchtype=author&query=Manica%2C+M">Mattia Manica</a>, <a href="/search/cs?searchtype=author&query=Zardini%2C+A">Agnese Zardini</a>, <a href="/search/cs?searchtype=author&query=Flatken%2C+M">Markus Flatken</a>, <a href="/search/cs?searchtype=author&query=Vidal%2C+J">Jules Vidal</a>, <a href="/search/cs?searchtype=author&query=Gueunet%2C+C">Charles Gueunet</a>, <a href="/search/cs?searchtype=author&query=Belikov%2C+E">Evgenij Belikov</a>, <a href="/search/cs?searchtype=author&query=Tierny%2C+J">Julien Tierny</a>, <a href="/search/cs?searchtype=author&query=Podobas%2C+A">Artur Podobas</a>, <a href="/search/cs?searchtype=author&query=Der+Chien%2C+W">Wei Der Chien</a>, <a href="/search/cs?searchtype=author&query=Markidis%2C+S">Stefano Markidis</a>, <a href="/search/cs?searchtype=author&query=Gerndt%2C+A">Andreas Gerndt</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2111.05654v1-abstract-short" style="display: inline;"> It is estimated that around 80\% of the world's population live in areas susceptible to at-least one major vector borne disease, and approximately 20% of global communicable diseases are spread by mosquitoes. Furthermore, the outbreaks of such diseases are becoming more common and widespread, with much of this driven in recent years by socio-demographic and climatic factors. These trends are causi… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.05654v1-abstract-full').style.display = 'inline'; document.getElementById('2111.05654v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2111.05654v1-abstract-full" style="display: none;"> It is estimated that around 80\% of the world's population live in areas susceptible to at-least one major vector borne disease, and approximately 20% of global communicable diseases are spread by mosquitoes. Furthermore, the outbreaks of such diseases are becoming more common and widespread, with much of this driven in recent years by socio-demographic and climatic factors. These trends are causing significant worry to global health organisations, including the CDC and WHO, and-so an important question is the role that technology can play in addressing them. In this work we describe the integration of an epidemiology model, which simulates the spread of mosquito-borne diseases, with the VESTEC urgent computing ecosystem. The intention of this work is to empower human health professionals to exploit this model and more easily explore the progression of mosquito-borne diseases. Traditionally in the domain of the few research scientists, by leveraging state of the art visualisation and analytics techniques, all supported by running the computational workloads on HPC machines in a seamless fashion, we demonstrate the significant advantages that such an integration can provide. Furthermore we demonstrate the benefits of using an ecosystem such as VESTEC, which provides a framework for urgent computing, in supporting the easy adoption of these technologies by the epidemiologists and disaster response professionals more widely. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.05654v1-abstract-full').style.display = 'none'; document.getElementById('2111.05654v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Preprint of paper in 2021 IEEE/ACM HPC for Urgent Decision Making (UrgentHPC)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.08207">arXiv:2110.08207</a> <span> [<a href="https://arxiv.org/pdf/2110.08207">pdf</a>, <a href="https://arxiv.org/format/2110.08207">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Multitask Prompted Training Enables Zero-Shot Task Generalization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Sanh%2C+V">Victor Sanh</a>, <a href="/search/cs?searchtype=author&query=Webson%2C+A">Albert Webson</a>, <a href="/search/cs?searchtype=author&query=Raffel%2C+C">Colin Raffel</a>, <a href="/search/cs?searchtype=author&query=Bach%2C+S+H">Stephen H. Bach</a>, <a href="/search/cs?searchtype=author&query=Sutawika%2C+L">Lintang Sutawika</a>, <a href="/search/cs?searchtype=author&query=Alyafeai%2C+Z">Zaid Alyafeai</a>, <a href="/search/cs?searchtype=author&query=Chaffin%2C+A">Antoine Chaffin</a>, <a href="/search/cs?searchtype=author&query=Stiegler%2C+A">Arnaud Stiegler</a>, <a href="/search/cs?searchtype=author&query=Scao%2C+T+L">Teven Le Scao</a>, <a href="/search/cs?searchtype=author&query=Raja%2C+A">Arun Raja</a>, <a href="/search/cs?searchtype=author&query=Dey%2C+M">Manan Dey</a>, <a href="/search/cs?searchtype=author&query=Bari%2C+M+S">M Saiful Bari</a>, <a href="/search/cs?searchtype=author&query=Xu%2C+C">Canwen Xu</a>, <a href="/search/cs?searchtype=author&query=Thakker%2C+U">Urmish Thakker</a>, <a href="/search/cs?searchtype=author&query=Sharma%2C+S+S">Shanya Sharma Sharma</a>, <a href="/search/cs?searchtype=author&query=Szczechla%2C+E">Eliza Szczechla</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+T">Taewoon Kim</a>, <a href="/search/cs?searchtype=author&query=Chhablani%2C+G">Gunjan Chhablani</a>, <a href="/search/cs?searchtype=author&query=Nayak%2C+N">Nihal Nayak</a>, <a href="/search/cs?searchtype=author&query=Datta%2C+D">Debajyoti Datta</a>, <a href="/search/cs?searchtype=author&query=Chang%2C+J">Jonathan Chang</a>, <a href="/search/cs?searchtype=author&query=Jiang%2C+M+T">Mike Tian-Jian Jiang</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+H">Han Wang</a>, <a href="/search/cs?searchtype=author&query=Manica%2C+M">Matteo Manica</a>, <a href="/search/cs?searchtype=author&query=Shen%2C+S">Sheng Shen</a> , et al. (16 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.08207v3-abstract-short" style="display: inline;"> Large language models have recently been shown to attain reasonable zero-shot generalization on a diverse set of tasks (Brown et al., 2020). It has been hypothesized that this is a consequence of implicit multitask learning in language models' pretraining (Radford et al., 2019). Can zero-shot generalization instead be directly induced by explicit multitask learning? To test this question at scale,… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.08207v3-abstract-full').style.display = 'inline'; document.getElementById('2110.08207v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.08207v3-abstract-full" style="display: none;"> Large language models have recently been shown to attain reasonable zero-shot generalization on a diverse set of tasks (Brown et al., 2020). It has been hypothesized that this is a consequence of implicit multitask learning in language models' pretraining (Radford et al., 2019). Can zero-shot generalization instead be directly induced by explicit multitask learning? To test this question at scale, we develop a system for easily mapping any natural language tasks into a human-readable prompted form. We convert a large set of supervised datasets, each with multiple prompts with diverse wording. These prompted datasets allow for benchmarking the ability of a model to perform completely held-out tasks. We fine-tune a pretrained encoder-decoder model (Raffel et al., 2020; Lester et al., 2021) on this multitask mixture covering a wide variety of tasks. The model attains strong zero-shot performance on several standard datasets, often outperforming models up to 16x its size. Further, our approach attains strong performance on a subset of tasks from the BIG-bench benchmark, outperforming models up to 6x its size. All trained models are available at https://github.com/bigscience-workshop/t-zero and all prompts are available at https://github.com/bigscience-workshop/promptsource. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.08207v3-abstract-full').style.display = 'none'; document.getElementById('2110.08207v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ICLR 2022 Spotlight (with extended discussion)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2012.10271">arXiv:2012.10271</a> <span> [<a href="https://arxiv.org/pdf/2012.10271">pdf</a>, <a href="https://arxiv.org/format/2012.10271">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Understood in Translation, Transformers for Domain Understanding </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Christofidellis%2C+D">Dimitrios Christofidellis</a>, <a href="/search/cs?searchtype=author&query=Manica%2C+M">Matteo Manica</a>, <a href="/search/cs?searchtype=author&query=Georgopoulos%2C+L">Leonidas Georgopoulos</a>, <a href="/search/cs?searchtype=author&query=Vandierendonck%2C+H">Hans Vandierendonck</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2012.10271v1-abstract-short" style="display: inline;"> Knowledge acquisition is the essential first step of any Knowledge Graph (KG) application. This knowledge can be extracted from a given corpus (KG generation process) or specified from an existing KG (KG specification process). Focusing on domain specific solutions, knowledge acquisition is a labor intensive task usually orchestrated and supervised by subject matter experts. Specifically, the doma… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.10271v1-abstract-full').style.display = 'inline'; document.getElementById('2012.10271v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2012.10271v1-abstract-full" style="display: none;"> Knowledge acquisition is the essential first step of any Knowledge Graph (KG) application. This knowledge can be extracted from a given corpus (KG generation process) or specified from an existing KG (KG specification process). Focusing on domain specific solutions, knowledge acquisition is a labor intensive task usually orchestrated and supervised by subject matter experts. Specifically, the domain of interest is usually manually defined and then the needed generation or extraction tools are utilized to produce the KG. Herein, we propose a supervised machine learning method, based on Transformers, for domain definition of a corpus. We argue why such automated definition of the domain's structure is beneficial both in terms of construction time and quality of the generated graph. The proposed method is extensively validated on three public datasets (WebNLG, NYT and DocRED) by comparing it with two reference methods based on CNNs and RNNs models. The evaluation shows the efficiency of our model in this task. Focusing on scientific document understanding, we present a new health domain dataset based on publications extracted from PubMed and we successfully utilize our method on this. Lastly, we demonstrate how this work lays the foundation for fully automated and unsupervised KG generation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.10271v1-abstract-full').style.display = 'none'; document.getElementById('2012.10271v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 December, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">4 figures, 7 tables, main text pages 8, appendix pages 6</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2012.03084">arXiv:2012.03084</a> <span> [<a href="https://arxiv.org/pdf/2012.03084">pdf</a>, <a href="https://arxiv.org/format/2012.03084">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Biomolecules">q-bio.BM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Pre-training Protein Language Models with Label-Agnostic Binding Pairs Enhances Performance in Downstream Tasks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Filipavicius%2C+M">Modestas Filipavicius</a>, <a href="/search/cs?searchtype=author&query=Manica%2C+M">Matteo Manica</a>, <a href="/search/cs?searchtype=author&query=Cadow%2C+J">Joris Cadow</a>, <a href="/search/cs?searchtype=author&query=Martinez%2C+M+R">Maria Rodriguez Martinez</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2012.03084v1-abstract-short" style="display: inline;"> Less than 1% of protein sequences are structurally and functionally annotated. Natural Language Processing (NLP) community has recently embraced self-supervised learning as a powerful approach to learn representations from unlabeled text, in large part due to the attention-based context-aware Transformer models. In this work we present a modification to the RoBERTa model by inputting during pre-tr… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.03084v1-abstract-full').style.display = 'inline'; document.getElementById('2012.03084v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2012.03084v1-abstract-full" style="display: none;"> Less than 1% of protein sequences are structurally and functionally annotated. Natural Language Processing (NLP) community has recently embraced self-supervised learning as a powerful approach to learn representations from unlabeled text, in large part due to the attention-based context-aware Transformer models. In this work we present a modification to the RoBERTa model by inputting during pre-training a mixture of binding and non-binding protein sequences (from STRING database). However, the sequence pairs have no label to indicate their binding status, as the model relies solely on Masked Language Modeling (MLM) objective during pre-training. After fine-tuning, such approach surpasses models trained on single protein sequences for protein-protein binding prediction, TCR-epitope binding prediction, cellular-localization and remote homology classification tasks. We suggest that the Transformer's attention mechanism contributes to protein binding site discovery. Furthermore, we compress protein sequences by 64% with the Byte Pair Encoding (BPE) vocabulary consisting of 10K subwords, each around 3-4 amino acids long. Finally, to expand the model input space to even larger proteins and multi-protein assemblies, we pre-train Longformer models that support 2,048 tokens. Further work in token-level classification for secondary structure prediction is needed. Code available at: https://github.com/PaccMann/paccmann_proteomics <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.03084v1-abstract-full').style.display = 'none'; document.getElementById('2012.03084v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 December, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">20 pages, 12 figures, accepted to Machine Learning for Structural Biology (MLSB) workshop at the 34th Conference on Neural Information Processing Systems (NeurIPS)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2009.11152">arXiv:2009.11152</a> <span> [<a href="https://arxiv.org/pdf/2009.11152">pdf</a>, <a href="https://arxiv.org/format/2009.11152">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Hierarchical Pre-training for Sequence Labelling in Spoken Dialog </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chapuis%2C+E">Emile Chapuis</a>, <a href="/search/cs?searchtype=author&query=Colombo%2C+P">Pierre Colombo</a>, <a href="/search/cs?searchtype=author&query=Manica%2C+M">Matteo Manica</a>, <a href="/search/cs?searchtype=author&query=Labeau%2C+M">Matthieu Labeau</a>, <a href="/search/cs?searchtype=author&query=Clavel%2C+C">Chloe Clavel</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2009.11152v3-abstract-short" style="display: inline;"> Sequence labelling tasks like Dialog Act and Emotion/Sentiment identification are a key component of spoken dialog systems. In this work, we propose a new approach to learn generic representations adapted to spoken dialog, which we evaluate on a new benchmark we call Sequence labellIng evaLuatIon benChmark fOr spoken laNguagE benchmark (\texttt{SILICONE}). \texttt{SILICONE} is model-agnostic and c… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.11152v3-abstract-full').style.display = 'inline'; document.getElementById('2009.11152v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2009.11152v3-abstract-full" style="display: none;"> Sequence labelling tasks like Dialog Act and Emotion/Sentiment identification are a key component of spoken dialog systems. In this work, we propose a new approach to learn generic representations adapted to spoken dialog, which we evaluate on a new benchmark we call Sequence labellIng evaLuatIon benChmark fOr spoken laNguagE benchmark (\texttt{SILICONE}). \texttt{SILICONE} is model-agnostic and contains 10 different datasets of various sizes. We obtain our representations with a hierarchical encoder based on transformer architectures, for which we extend two well-known pre-training objectives. Pre-training is performed on OpenSubtitles: a large corpus of spoken dialog containing over $2.3$ billion of tokens. We demonstrate how hierarchical encoders achieve competitive results with consistently fewer parameters compared to state-of-the-art models and we show their importance for both pre-training and fine-tuning. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.11152v3-abstract-full').style.display = 'none'; document.getElementById('2009.11152v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 September, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> EMNLP 2020 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2005.13285">arXiv:2005.13285</a> <span> [<a href="https://arxiv.org/pdf/2005.13285">pdf</a>, <a href="https://arxiv.org/format/2005.13285">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> PaccMann$^{RL}$ on SARS-CoV-2: Designing antiviral candidates with conditional generative models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Born%2C+J">Jannis Born</a>, <a href="/search/cs?searchtype=author&query=Manica%2C+M">Matteo Manica</a>, <a href="/search/cs?searchtype=author&query=Cadow%2C+J">Joris Cadow</a>, <a href="/search/cs?searchtype=author&query=Markert%2C+G">Greta Markert</a>, <a href="/search/cs?searchtype=author&query=Mill%2C+N+A">Nil Adell Mill</a>, <a href="/search/cs?searchtype=author&query=Filipavicius%2C+M">Modestas Filipavicius</a>, <a href="/search/cs?searchtype=author&query=Mart%C3%ADnez%2C+M+R">Mar铆a Rodr铆guez Mart铆nez</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2005.13285v3-abstract-short" style="display: inline;"> With the fast development of COVID-19 into a global pandemic, scientists around the globe are desperately searching for effective antiviral therapeutic agents. Bridging systems biology and drug discovery, we propose a deep learning framework for conditional de novo design of antiviral candidate drugs tailored against given protein targets. First, we train a multimodal ligand--protein binding affin… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.13285v3-abstract-full').style.display = 'inline'; document.getElementById('2005.13285v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2005.13285v3-abstract-full" style="display: none;"> With the fast development of COVID-19 into a global pandemic, scientists around the globe are desperately searching for effective antiviral therapeutic agents. Bridging systems biology and drug discovery, we propose a deep learning framework for conditional de novo design of antiviral candidate drugs tailored against given protein targets. First, we train a multimodal ligand--protein binding affinity model on predicting affinities of antiviral compounds to target proteins and couple this model with pharmacological toxicity predictors. Exploiting this multi-objective as a reward function of a conditional molecular generator (consisting of two VAEs), we showcase a framework that navigates the chemical space toward regions with more antiviral molecules. Specifically, we explore a challenging setting of generating ligands against unseen protein targets by performing a leave-one-out-cross-validation on 41 SARS-CoV-2-related target proteins. Using deep RL, it is demonstrated that in 35 out of 41 cases, the generation is biased towards sampling more binding ligands, with an average increase of 83% comparing to an unbiased VAE. We present a case-study on a potential Envelope-protein inhibitor and perform a synthetic accessibility assessment of the best generated molecules is performed that resembles a viable roadmap towards a rapid in-vitro evaluation of potential SARS-CoV-2 inhibitors. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.13285v3-abstract-full').style.display = 'none'; document.getElementById('2005.13285v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 May, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 6 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> ICML Workshop on Computational Biology 2020 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2004.01215">arXiv:2004.01215</a> <span> [<a href="https://arxiv.org/pdf/2004.01215">pdf</a>, <a href="https://arxiv.org/format/2004.01215">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> CogMol: Target-Specific and Selective Drug Design for COVID-19 Using Deep Generative Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chenthamarakshan%2C+V">Vijil Chenthamarakshan</a>, <a href="/search/cs?searchtype=author&query=Das%2C+P">Payel Das</a>, <a href="/search/cs?searchtype=author&query=Hoffman%2C+S+C">Samuel C. Hoffman</a>, <a href="/search/cs?searchtype=author&query=Strobelt%2C+H">Hendrik Strobelt</a>, <a href="/search/cs?searchtype=author&query=Padhi%2C+I">Inkit Padhi</a>, <a href="/search/cs?searchtype=author&query=Lim%2C+K+W">Kar Wai Lim</a>, <a href="/search/cs?searchtype=author&query=Hoover%2C+B">Benjamin Hoover</a>, <a href="/search/cs?searchtype=author&query=Manica%2C+M">Matteo Manica</a>, <a href="/search/cs?searchtype=author&query=Born%2C+J">Jannis Born</a>, <a href="/search/cs?searchtype=author&query=Laino%2C+T">Teodoro Laino</a>, <a href="/search/cs?searchtype=author&query=Mojsilovic%2C+A">Aleksandra Mojsilovic</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2004.01215v2-abstract-short" style="display: inline;"> The novel nature of SARS-CoV-2 calls for the development of efficient de novo drug design approaches. In this study, we propose an end-to-end framework, named CogMol (Controlled Generation of Molecules), for designing new drug-like small molecules targeting novel viral proteins with high affinity and off-target selectivity. CogMol combines adaptive pre-training of a molecular SMILES Variational Au… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.01215v2-abstract-full').style.display = 'inline'; document.getElementById('2004.01215v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2004.01215v2-abstract-full" style="display: none;"> The novel nature of SARS-CoV-2 calls for the development of efficient de novo drug design approaches. In this study, we propose an end-to-end framework, named CogMol (Controlled Generation of Molecules), for designing new drug-like small molecules targeting novel viral proteins with high affinity and off-target selectivity. CogMol combines adaptive pre-training of a molecular SMILES Variational Autoencoder (VAE) and an efficient multi-attribute controlled sampling scheme that uses guidance from attribute predictors trained on latent features. To generate novel and optimal drug-like molecules for unseen viral targets, CogMol leverages a protein-molecule binding affinity predictor that is trained using SMILES VAE embeddings and protein sequence embeddings learned unsupervised from a large corpus. CogMol framework is applied to three SARS-CoV-2 target proteins: main protease, receptor-binding domain of the spike protein, and non-structural protein 9 replicase. The generated candidates are novel at both molecular and chemical scaffold levels when compared to the training data. CogMol also includes insilico screening for assessing toxicity of parent molecules and their metabolites with a multi-task toxicity classifier, synthetic feasibility with a chemical retrosynthesis predictor, and target structure binding with docking simulations. Docking reveals favorable binding of generated molecules to the target protein structure, where 87-95 % of high affinity molecules showed docking free energy < -6 kcal/mol. When compared to approved drugs, the majority of designed compounds show low parent molecule and metabolite toxicity and high synthetic feasibility. In summary, CogMol handles multi-constraint design of synthesizable, low-toxic, drug-like molecules with high target specificity and selectivity, and does not need target-dependent fine-tuning of the framework or target structure information. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.01215v2-abstract-full').style.display = 'none'; document.getElementById('2004.01215v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2002.09419">arXiv:2002.09419</a> <span> [<a href="https://arxiv.org/pdf/2002.09419">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Guider l'attention dans les modeles de sequence a sequence pour la prediction des actes de dialogue </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Colombo%2C+P">Pierre Colombo</a>, <a href="/search/cs?searchtype=author&query=Chapuis%2C+E">Emile Chapuis</a>, <a href="/search/cs?searchtype=author&query=Manica%2C+M">Matteo Manica</a>, <a href="/search/cs?searchtype=author&query=Vignon%2C+E">Emmanuel Vignon</a>, <a href="/search/cs?searchtype=author&query=Varni%2C+G">Giovanna Varni</a>, <a href="/search/cs?searchtype=author&query=Clavel%2C+C">Chloe Clavel</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2002.09419v2-abstract-short" style="display: inline;"> The task of predicting dialog acts (DA) based on conversational dialog is a key component in the development of conversational agents. Accurately predicting DAs requires a precise modeling of both the conversation and the global tag dependencies. We leverage seq2seq approaches widely adopted in Neural Machine Translation (NMT) to improve the modelling of tag sequentiality. Seq2seq models are known… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.09419v2-abstract-full').style.display = 'inline'; document.getElementById('2002.09419v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2002.09419v2-abstract-full" style="display: none;"> The task of predicting dialog acts (DA) based on conversational dialog is a key component in the development of conversational agents. Accurately predicting DAs requires a precise modeling of both the conversation and the global tag dependencies. We leverage seq2seq approaches widely adopted in Neural Machine Translation (NMT) to improve the modelling of tag sequentiality. Seq2seq models are known to learn complex global dependencies while currently proposed approaches using linear conditional random fields (CRF) only model local tag dependencies. In this work, we introduce a seq2seq model tailored for DA classification using: a hierarchical encoder, a novel guided attention mechanism and beam search applied to both training and inference. Compared to the state of the art our model does not require handcrafted features and is trained end-to-end. Furthermore, the proposed approach achieves an unmatched accuracy score of 85% on SwDA, and state-of-the-art accuracy score of 91.6% on MRDA. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.09419v2-abstract-full').style.display = 'none'; document.getElementById('2002.09419v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 21 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">in French</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> WACAI 2020 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2002.08801">arXiv:2002.08801</a> <span> [<a href="https://arxiv.org/pdf/2002.08801">pdf</a>, <a href="https://arxiv.org/format/2002.08801">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Guiding attention in Sequence-to-sequence models for Dialogue Act prediction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Colombo%2C+P">Pierre Colombo</a>, <a href="/search/cs?searchtype=author&query=Chapuis%2C+E">Emile Chapuis</a>, <a href="/search/cs?searchtype=author&query=Manica%2C+M">Matteo Manica</a>, <a href="/search/cs?searchtype=author&query=Vignon%2C+E">Emmanuel Vignon</a>, <a href="/search/cs?searchtype=author&query=Varni%2C+G">Giovanna Varni</a>, <a href="/search/cs?searchtype=author&query=Clavel%2C+C">Chloe Clavel</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2002.08801v2-abstract-short" style="display: inline;"> The task of predicting dialog acts (DA) based on conversational dialog is a key component in the development of conversational agents. Accurately predicting DAs requires a precise modeling of both the conversation and the global tag dependencies. We leverage seq2seq approaches widely adopted in Neural Machine Translation (NMT) to improve the modelling of tag sequentiality. Seq2seq models are known… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.08801v2-abstract-full').style.display = 'inline'; document.getElementById('2002.08801v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2002.08801v2-abstract-full" style="display: none;"> The task of predicting dialog acts (DA) based on conversational dialog is a key component in the development of conversational agents. Accurately predicting DAs requires a precise modeling of both the conversation and the global tag dependencies. We leverage seq2seq approaches widely adopted in Neural Machine Translation (NMT) to improve the modelling of tag sequentiality. Seq2seq models are known to learn complex global dependencies while currently proposed approaches using linear conditional random fields (CRF) only model local tag dependencies. In this work, we introduce a seq2seq model tailored for DA classification using: a hierarchical encoder, a novel guided attention mechanism and beam search applied to both training and inference. Compared to the state of the art our model does not require handcrafted features and is trained end-to-end. Furthermore, the proposed approach achieves an unmatched accuracy score of 85% on SwDA, and state-of-the-art accuracy score of 91.6% on MRDA. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.08801v2-abstract-full').style.display = 'none'; document.getElementById('2002.08801v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> AAAI 2020 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1909.05114">arXiv:1909.05114</a> <span> [<a href="https://arxiv.org/pdf/1909.05114">pdf</a>, <a href="https://arxiv.org/format/1909.05114">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Biomolecules">q-bio.BM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1007/978-3-030-45257-5_18">10.1007/978-3-030-45257-5_18 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> PaccMann$^{RL}$: Designing anticancer drugs from transcriptomic data via reinforcement learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Born%2C+J">Jannis Born</a>, <a href="/search/cs?searchtype=author&query=Manica%2C+M">Matteo Manica</a>, <a href="/search/cs?searchtype=author&query=Oskooei%2C+A">Ali Oskooei</a>, <a href="/search/cs?searchtype=author&query=Cadow%2C+J">Joris Cadow</a>, <a href="/search/cs?searchtype=author&query=Borgwardt%2C+K">Karsten Borgwardt</a>, <a href="/search/cs?searchtype=author&query=Mart%C3%ADnez%2C+M+R">Mar铆a Rodr铆guez Mart铆nez</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1909.05114v4-abstract-short" style="display: inline;"> With the advent of deep generative models in computational chemistry, in silico anticancer drug design has undergone an unprecedented transformation. While state-of-the-art deep learning approaches have shown potential in generating compounds with desired chemical properties, they disregard the genetic profile and properties of the target disease. Here, we introduce the first generative model capa… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.05114v4-abstract-full').style.display = 'inline'; document.getElementById('1909.05114v4-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1909.05114v4-abstract-full" style="display: none;"> With the advent of deep generative models in computational chemistry, in silico anticancer drug design has undergone an unprecedented transformation. While state-of-the-art deep learning approaches have shown potential in generating compounds with desired chemical properties, they disregard the genetic profile and properties of the target disease. Here, we introduce the first generative model capable of tailoring anticancer compounds for a specific biomolecular profile. Using a RL framework, the transcriptomic profiles of cancer cells are used as a context for the generation of candidate molecules. Our molecule generator combines two separately pretrained variational autoencoders (VAEs) - the first VAE encodes transcriptomic profiles into a smooth, latent space which in turn is used to condition a second VAE to generate novel molecular structures on the given transcriptomic profile. The generative process is optimized through PaccMann, a previously developed drug sensitivity prediction model to obtain effective anticancer compounds for the given context (i.e., transcriptomic profile). We demonstrate how the molecule generation can be biased towards compounds with high predicted inhibitory effect against individual cell lines or specific cancer sites. We verify our approach by investigating candidate drugs generated against specific cancer types and find the highest structural similarity to existing compounds with known efficacy against these cancer types. We envision our approach to transform in silico anticancer drug design by leveraging the biomolecular characteristics of the disease in order to increase success rates in lead compound discovery. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.05114v4-abstract-full').style.display = 'none'; document.getElementById('1909.05114v4-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 August, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages total (12 pages main text, 4 pages references, 11 pages appendix) 8 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> International Conference on Research in Computational Molecular Biology 2020 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1907.08400">arXiv:1907.08400</a> <span> [<a href="https://arxiv.org/pdf/1907.08400">pdf</a>, <a href="https://arxiv.org/format/1907.08400">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> An Information Extraction and Knowledge Graph Platform for Accelerating Biochemical Discoveries </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Manica%2C+M">Matteo Manica</a>, <a href="/search/cs?searchtype=author&query=Auer%2C+C">Christoph Auer</a>, <a href="/search/cs?searchtype=author&query=Weber%2C+V">Valery Weber</a>, <a href="/search/cs?searchtype=author&query=Zipoli%2C+F">Federico Zipoli</a>, <a href="/search/cs?searchtype=author&query=Dolfi%2C+M">Michele Dolfi</a>, <a href="/search/cs?searchtype=author&query=Staar%2C+P">Peter Staar</a>, <a href="/search/cs?searchtype=author&query=Laino%2C+T">Teodoro Laino</a>, <a href="/search/cs?searchtype=author&query=Bekas%2C+C">Costas Bekas</a>, <a href="/search/cs?searchtype=author&query=Fujita%2C+A">Akihiro Fujita</a>, <a href="/search/cs?searchtype=author&query=Toda%2C+H">Hiroki Toda</a>, <a href="/search/cs?searchtype=author&query=Hirose%2C+S">Shuichi Hirose</a>, <a href="/search/cs?searchtype=author&query=Orii%2C+Y">Yasumitsu Orii</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1907.08400v1-abstract-short" style="display: inline;"> Information extraction and data mining in biochemical literature is a daunting task that demands resource-intensive computation and appropriate means to scale knowledge ingestion. Being able to leverage this immense source of technical information helps to drastically reduce costs and time to solution in multiple application fields from food safety to pharmaceutics. We present a scalable document… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.08400v1-abstract-full').style.display = 'inline'; document.getElementById('1907.08400v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1907.08400v1-abstract-full" style="display: none;"> Information extraction and data mining in biochemical literature is a daunting task that demands resource-intensive computation and appropriate means to scale knowledge ingestion. Being able to leverage this immense source of technical information helps to drastically reduce costs and time to solution in multiple application fields from food safety to pharmaceutics. We present a scalable document ingestion system that integrates data from databases and publications (in PDF format) in a biochemistry knowledge graph (BCKG). The BCKG is a comprehensive source of knowledge that can be queried to retrieve known biochemical facts and to generate novel insights. After describing the knowledge ingestion framework, we showcase an application of our system in the field of carbohydrate enzymes. The BCKG represents a way to scale knowledge ingestion and automatically exploit prior knowledge to accelerate discovery in biochemical sciences. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.08400v1-abstract-full').style.display = 'none'; document.getElementById('1907.08400v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 July, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">4 pages, 1 figure, Workshop on Applied Data Science for Healthcare at KDD, Anchorage, AK, 2019</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1904.11223">arXiv:1904.11223</a> <span> [<a href="https://arxiv.org/pdf/1904.11223">pdf</a>, <a href="https://arxiv.org/format/1904.11223">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1021/acs.molpharmaceut.9b00520">10.1021/acs.molpharmaceut.9b00520 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Towards Explainable Anticancer Compound Sensitivity Prediction via Multimodal Attention-based Convolutional Encoders </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Manica%2C+M">Matteo Manica</a>, <a href="/search/cs?searchtype=author&query=Oskooei%2C+A">Ali Oskooei</a>, <a href="/search/cs?searchtype=author&query=Born%2C+J">Jannis Born</a>, <a href="/search/cs?searchtype=author&query=Subramanian%2C+V">Vigneshwari Subramanian</a>, <a href="/search/cs?searchtype=author&query=S%C3%A1ez-Rodr%C3%ADguez%2C+J">Julio S谩ez-Rodr铆guez</a>, <a href="/search/cs?searchtype=author&query=Mart%C3%ADnez%2C+M+R">Mar铆a Rodr铆guez Mart铆nez</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1904.11223v3-abstract-short" style="display: inline;"> In line with recent advances in neural drug design and sensitivity prediction, we propose a novel architecture for interpretable prediction of anticancer compound sensitivity using a multimodal attention-based convolutional encoder. Our model is based on the three key pillars of drug sensitivity: compounds' structure in the form of a SMILES sequence, gene expression profiles of tumors and prior kn… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.11223v3-abstract-full').style.display = 'inline'; document.getElementById('1904.11223v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1904.11223v3-abstract-full" style="display: none;"> In line with recent advances in neural drug design and sensitivity prediction, we propose a novel architecture for interpretable prediction of anticancer compound sensitivity using a multimodal attention-based convolutional encoder. Our model is based on the three key pillars of drug sensitivity: compounds' structure in the form of a SMILES sequence, gene expression profiles of tumors and prior knowledge on intracellular interactions from protein-protein interaction networks. We demonstrate that our multiscale convolutional attention-based (MCA) encoder significantly outperforms a baseline model trained on Morgan fingerprints, a selection of encoders based on SMILES as well as previously reported state of the art for multimodal drug sensitivity prediction (R2 = 0.86 and RMSE = 0.89). Moreover, the explainability of our approach is demonstrated by a thorough analysis of the attention weights. We show that the attended genes significantly enrich apoptotic processes and that the drug attention is strongly correlated with a standard chemical structure similarity index. Finally, we report a case study of two receptor tyrosine kinase (RTK) inhibitors acting on a leukemia cell line, showcasing the ability of the model to focus on informative genes and submolecular regions of the two compounds. The demonstrated generalizability and the interpretability of our model testify its potential for in-silico prediction of anticancer compound efficacy on unseen cancer cells, positioning it as a valid solution for the development of personalized therapies as well as for the evaluation of candidate compounds in de novo drug design. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1904.11223v3-abstract-full').style.display = 'none'; document.getElementById('1904.11223v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 July, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 25 April, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 5 figures, 1 table, Workshop on Computational Biology at the International Conference on Machine Learning (ICML), Long Beach, CA, 2019</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Mol. Pharmaceutics 2019 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1901.06261">arXiv:1901.06261</a> <span> [<a href="https://arxiv.org/pdf/1901.06261">pdf</a>, <a href="https://arxiv.org/format/1901.06261">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> NeuNetS: An Automated Synthesis Engine for Neural Network Design </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Sood%2C+A">Atin Sood</a>, <a href="/search/cs?searchtype=author&query=Elder%2C+B">Benjamin Elder</a>, <a href="/search/cs?searchtype=author&query=Herta%2C+B">Benjamin Herta</a>, <a href="/search/cs?searchtype=author&query=Xue%2C+C">Chao Xue</a>, <a href="/search/cs?searchtype=author&query=Bekas%2C+C">Costas Bekas</a>, <a href="/search/cs?searchtype=author&query=Malossi%2C+A+C+I">A. Cristiano I. Malossi</a>, <a href="/search/cs?searchtype=author&query=Saha%2C+D">Debashish Saha</a>, <a href="/search/cs?searchtype=author&query=Scheidegger%2C+F">Florian Scheidegger</a>, <a href="/search/cs?searchtype=author&query=Venkataraman%2C+G">Ganesh Venkataraman</a>, <a href="/search/cs?searchtype=author&query=Thomas%2C+G">Gegi Thomas</a>, <a href="/search/cs?searchtype=author&query=Mariani%2C+G">Giovanni Mariani</a>, <a href="/search/cs?searchtype=author&query=Strobelt%2C+H">Hendrik Strobelt</a>, <a href="/search/cs?searchtype=author&query=Samulowitz%2C+H">Horst Samulowitz</a>, <a href="/search/cs?searchtype=author&query=Wistuba%2C+M">Martin Wistuba</a>, <a href="/search/cs?searchtype=author&query=Manica%2C+M">Matteo Manica</a>, <a href="/search/cs?searchtype=author&query=Choudhury%2C+M">Mihir Choudhury</a>, <a href="/search/cs?searchtype=author&query=Yan%2C+R">Rong Yan</a>, <a href="/search/cs?searchtype=author&query=Istrate%2C+R">Roxana Istrate</a>, <a href="/search/cs?searchtype=author&query=Puri%2C+R">Ruchir Puri</a>, <a href="/search/cs?searchtype=author&query=Pedapati%2C+T">Tejaswini Pedapati</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1901.06261v1-abstract-short" style="display: inline;"> Application of neural networks to a vast variety of practical applications is transforming the way AI is applied in practice. Pre-trained neural network models available through APIs or capability to custom train pre-built neural network architectures with customer data has made the consumption of AI by developers much simpler and resulted in broad adoption of these complex AI models. While prebui… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.06261v1-abstract-full').style.display = 'inline'; document.getElementById('1901.06261v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1901.06261v1-abstract-full" style="display: none;"> Application of neural networks to a vast variety of practical applications is transforming the way AI is applied in practice. Pre-trained neural network models available through APIs or capability to custom train pre-built neural network architectures with customer data has made the consumption of AI by developers much simpler and resulted in broad adoption of these complex AI models. While prebuilt network models exist for certain scenarios, to try and meet the constraints that are unique to each application, AI teams need to think about developing custom neural network architectures that can meet the tradeoff between accuracy and memory footprint to achieve the tight constraints of their unique use-cases. However, only a small proportion of data science teams have the skills and experience needed to create a neural network from scratch, and the demand far exceeds the supply. In this paper, we present NeuNetS : An automated Neural Network Synthesis engine for custom neural network design that is available as part of IBM's AI OpenScale's product. NeuNetS is available for both Text and Image domains and can build neural networks for specific tasks in a fraction of the time it takes today with human effort, and with accuracy similar to that of human-designed AI models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.06261v1-abstract-full').style.display = 'none'; document.getElementById('1901.06261v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 January, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 12 figures. arXiv admin note: text overlap with arXiv:1806.00250</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1811.06802">arXiv:1811.06802</a> <span> [<a href="https://arxiv.org/pdf/1811.06802">pdf</a>, <a href="https://arxiv.org/format/1811.06802">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Molecular Networks">q-bio.MN</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> </div> <p class="title is-5 mathjax"> PaccMann: Prediction of anticancer compound sensitivity with multi-modal attention-based neural networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Oskooei%2C+A">Ali Oskooei</a>, <a href="/search/cs?searchtype=author&query=Born%2C+J">Jannis Born</a>, <a href="/search/cs?searchtype=author&query=Manica%2C+M">Matteo Manica</a>, <a href="/search/cs?searchtype=author&query=Subramanian%2C+V">Vigneshwari Subramanian</a>, <a href="/search/cs?searchtype=author&query=S%C3%A1ez-Rodr%C3%ADguez%2C+J">Julio S谩ez-Rodr铆guez</a>, <a href="/search/cs?searchtype=author&query=Mart%C3%ADnez%2C+M+R">Mar铆a Rodr铆guez Mart铆nez</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1811.06802v2-abstract-short" style="display: inline;"> We present a novel approach for the prediction of anticancer compound sensitivity by means of multi-modal attention-based neural networks (PaccMann). In our approach, we integrate three key pillars of drug sensitivity, namely, the molecular structure of compounds, transcriptomic profiles of cancer cells as well as prior knowledge about interactions among proteins within cells. Our models ingest a… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.06802v2-abstract-full').style.display = 'inline'; document.getElementById('1811.06802v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1811.06802v2-abstract-full" style="display: none;"> We present a novel approach for the prediction of anticancer compound sensitivity by means of multi-modal attention-based neural networks (PaccMann). In our approach, we integrate three key pillars of drug sensitivity, namely, the molecular structure of compounds, transcriptomic profiles of cancer cells as well as prior knowledge about interactions among proteins within cells. Our models ingest a drug-cell pair consisting of SMILES encoding of a compound and the gene expression profile of a cancer cell and predicts an IC50 sensitivity value. Gene expression profiles are encoded using an attention-based encoding mechanism that assigns high weights to the most informative genes. We present and study three encoders for SMILES string of compounds: 1) bidirectional recurrent 2) convolutional 3) attention-based encoders. We compare our devised models against a baseline model that ingests engineered fingerprints to represent the molecular structure. We demonstrate that using our attention-based encoders, we can surpass the baseline model. The use of attention-based encoders enhance interpretability and enable us to identify genes, bonds and atoms that were used by the network to make a prediction. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.06802v2-abstract-full').style.display = 'none'; document.getElementById('1811.06802v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 July, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 November, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 5 figures, 2 tables. NIPS MLMM 2018</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> NeurIPS 2018 Workshop on Machine Learning for Molecules & Materials </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1808.06603">arXiv:1808.06603</a> <span> [<a href="https://arxiv.org/pdf/1808.06603">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Network-based Biased Tree Ensembles (NetBiTE) for Drug Sensitivity Prediction and Drug Sensitivity Biomarker Identification in Cancer </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Oskooei%2C+A">Ali Oskooei</a>, <a href="/search/cs?searchtype=author&query=Manica%2C+M">Matteo Manica</a>, <a href="/search/cs?searchtype=author&query=Mathis%2C+R">Roland Mathis</a>, <a href="/search/cs?searchtype=author&query=Martinez%2C+M+R">Maria Rodriguez Martinez</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1808.06603v2-abstract-short" style="display: inline;"> We present the Network-based Biased Tree Ensembles (NetBiTE) method for drug sensitivity prediction and drug sensitivity biomarker identification in cancer using a combination of prior knowledge and gene expression data. Our devised method consists of a biased tree ensemble that is built according to a probabilistic bias weight distribution. The bias weight distribution is obtained from the assign… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1808.06603v2-abstract-full').style.display = 'inline'; document.getElementById('1808.06603v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1808.06603v2-abstract-full" style="display: none;"> We present the Network-based Biased Tree Ensembles (NetBiTE) method for drug sensitivity prediction and drug sensitivity biomarker identification in cancer using a combination of prior knowledge and gene expression data. Our devised method consists of a biased tree ensemble that is built according to a probabilistic bias weight distribution. The bias weight distribution is obtained from the assignment of high weights to the drug targets and propagating the assigned weights over a protein-protein interaction network such as STRING. The propagation of weights, defines neighborhoods of influence around the drug targets and as such simulates the spread of perturbations within the cell, following drug administration. Using a synthetic dataset, we showcase how application of biased tree ensembles (BiTE) results in significant accuracy gains at a much lower computational cost compared to the unbiased random forests (RF) algorithm. We then apply NetBiTE to the Genomics of Drug Sensitivity in Cancer (GDSC) dataset and demonstrate that NetBiTE outperforms RF in predicting IC50 drug sensitivity, only for drugs that target membrane receptor pathways (MRPs): RTK, EGFR and IGFR signaling pathways. We propose based on the NetBiTE results, that for drugs that inhibit MRPs, the expression of target genes prior to drug administration is a biomarker for IC50 drug sensitivity following drug administration. We further verify and reinforce this proposition through control studies on, PI3K/MTOR signaling pathway inhibitors, a drug category that does not target MRPs, and through assignment of dummy targets to MRP inhibiting drugs and investigating the variation in NetBiTE accuracy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1808.06603v2-abstract-full').style.display = 'none'; document.getElementById('1808.06603v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 April, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 August, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">36 pages, 5 figures, 3 supplementary figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1701.04279">arXiv:1701.04279</a> <span> [<a href="https://arxiv.org/pdf/1701.04279">pdf</a>, <a href="https://arxiv.org/format/1701.04279">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Emerging Technologies">cs.ET</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1038/s41928-018-0054-8">10.1038/s41928-018-0054-8 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Mixed-Precision In-Memory Computing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gallo%2C+M+L">Manuel Le Gallo</a>, <a href="/search/cs?searchtype=author&query=Sebastian%2C+A">Abu Sebastian</a>, <a href="/search/cs?searchtype=author&query=Mathis%2C+R">Roland Mathis</a>, <a href="/search/cs?searchtype=author&query=Manica%2C+M">Matteo Manica</a>, <a href="/search/cs?searchtype=author&query=Giefers%2C+H">Heiner Giefers</a>, <a href="/search/cs?searchtype=author&query=Tuma%2C+T">Tomas Tuma</a>, <a href="/search/cs?searchtype=author&query=Bekas%2C+C">Costas Bekas</a>, <a href="/search/cs?searchtype=author&query=Curioni%2C+A">Alessandro Curioni</a>, <a href="/search/cs?searchtype=author&query=Eleftheriou%2C+E">Evangelos Eleftheriou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1701.04279v5-abstract-short" style="display: inline;"> As CMOS scaling reaches its technological limits, a radical departure from traditional von Neumann systems, which involve separate processing and memory units, is needed in order to significantly extend the performance of today's computers. In-memory computing is a promising approach in which nanoscale resistive memory devices, organized in a computational memory unit, are used for both processing… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1701.04279v5-abstract-full').style.display = 'inline'; document.getElementById('1701.04279v5-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1701.04279v5-abstract-full" style="display: none;"> As CMOS scaling reaches its technological limits, a radical departure from traditional von Neumann systems, which involve separate processing and memory units, is needed in order to significantly extend the performance of today's computers. In-memory computing is a promising approach in which nanoscale resistive memory devices, organized in a computational memory unit, are used for both processing and memory. However, to reach the numerical accuracy typically required for data analytics and scientific computing, limitations arising from device variability and non-ideal device characteristics need to be addressed. Here we introduce the concept of mixed-precision in-memory computing, which combines a von Neumann machine with a computational memory unit. In this hybrid system, the computational memory unit performs the bulk of a computational task, while the von Neumann machine implements a backward method to iteratively improve the accuracy of the solution. The system therefore benefits from both the high precision of digital computing and the energy/areal efficiency of in-memory computing. We experimentally demonstrate the efficacy of the approach by accurately solving systems of linear equations, in particular, a system of 5,000 equations using 998,752 phase-change memory devices. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1701.04279v5-abstract-full').style.display = 'none'; document.getElementById('1701.04279v5-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 October, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 January, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Nature Electronics volume 1, pages 246-253 (2018) </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>