CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;11 of 11 results for author: <span class="mathjax">Manyika, J</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Manyika%2C+J">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Manyika, J"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Manyika%2C+J&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Manyika, J"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.13741">arXiv:2409.13741</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.13741">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> </div> </div> <p class="title is-5 mathjax"> Knowing When to Ask -- Bridging Large Language Models and Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Radhakrishnan%2C+P">Prashanth Radhakrishnan</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jennifer Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bo Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Ramaswami%2C+P">Prem Ramaswami</a>, <a href="/search/cs?searchtype=author&amp;query=Pho%2C+H">Hannah Pho</a>, <a href="/search/cs?searchtype=author&amp;query=Olmos%2C+A">Adriana Olmos</a>, <a href="/search/cs?searchtype=author&amp;query=Manyika%2C+J">James Manyika</a>, <a href="/search/cs?searchtype=author&amp;query=Guha%2C+R+V">R. V. Guha</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.13741v1-abstract-short" style="display: inline;"> Large Language Models (LLMs) are prone to generating factually incorrect information when responding to queries that involve numerical and statistical data or other timely facts. In this paper, we present an approach for enhancing the accuracy of LLMs by integrating them with Data Commons, a vast, open-source repository of public statistics from trusted organizations like the United Nations (UN),&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.13741v1-abstract-full').style.display = 'inline'; document.getElementById('2409.13741v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.13741v1-abstract-full" style="display: none;"> Large Language Models (LLMs) are prone to generating factually incorrect information when responding to queries that involve numerical and statistical data or other timely facts. In this paper, we present an approach for enhancing the accuracy of LLMs by integrating them with Data Commons, a vast, open-source repository of public statistics from trusted organizations like the United Nations (UN), Center for Disease Control and Prevention (CDC) and global census bureaus. We explore two primary methods: Retrieval Interleaved Generation (RIG), where the LLM is trained to produce natural language queries to retrieve data from Data Commons, and Retrieval Augmented Generation (RAG), where relevant data tables are fetched from Data Commons and used to augment the LLM&#39;s prompt. We evaluate these methods on a diverse set of queries, demonstrating their effectiveness in improving the factual accuracy of LLM outputs. Our work represents an early step towards building more trustworthy and reliable LLMs that are grounded in verifiable statistical data and capable of complex factual reasoning. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.13741v1-abstract-full').style.display = 'none'; document.getElementById('2409.13741v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">39 pages - 25 page paper, 14 page Appendix, 7 figures, 9 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.02711">arXiv:2407.02711</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.02711">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> AI in Action: Accelerating Progress Towards the Sustainable Development Goals </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Gosselink%2C+B+H">Brigitte Hoyer Gosselink</a>, <a href="/search/cs?searchtype=author&amp;query=Brandt%2C+K">Kate Brandt</a>, <a href="/search/cs?searchtype=author&amp;query=Croak%2C+M">Marian Croak</a>, <a href="/search/cs?searchtype=author&amp;query=DeSalvo%2C+K">Karen DeSalvo</a>, <a href="/search/cs?searchtype=author&amp;query=Gomes%2C+B">Ben Gomes</a>, <a href="/search/cs?searchtype=author&amp;query=Ibrahim%2C+L">Lila Ibrahim</a>, <a href="/search/cs?searchtype=author&amp;query=Johnson%2C+M">Maggie Johnson</a>, <a href="/search/cs?searchtype=author&amp;query=Matias%2C+Y">Yossi Matias</a>, <a href="/search/cs?searchtype=author&amp;query=Porat%2C+R">Ruth Porat</a>, <a href="/search/cs?searchtype=author&amp;query=Walker%2C+K">Kent Walker</a>, <a href="/search/cs?searchtype=author&amp;query=Manyika%2C+J">James Manyika</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.02711v1-abstract-short" style="display: inline;"> Advances in Artificial Intelligence (AI) are helping tackle a growing number of societal challenges, demonstrating technology&#39;s increasing capability to address complex issues, including those outlined in the United Nations (UN) Sustainable Development Goals (SDGs). Despite global efforts, 80 percent of SDG targets have deviated, stalled, or regressed, and only 15 percent are on track as of 2023,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.02711v1-abstract-full').style.display = 'inline'; document.getElementById('2407.02711v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.02711v1-abstract-full" style="display: none;"> Advances in Artificial Intelligence (AI) are helping tackle a growing number of societal challenges, demonstrating technology&#39;s increasing capability to address complex issues, including those outlined in the United Nations (UN) Sustainable Development Goals (SDGs). Despite global efforts, 80 percent of SDG targets have deviated, stalled, or regressed, and only 15 percent are on track as of 2023, illustrating the urgency of accelerating efforts to meet the goals by 2030. We draw on Google&#39;s internal and collaborative research, technical work, and social impact initiatives to show AI&#39;s potential to accelerate action on the SDGs and make substantive progress to help address humanity&#39;s most pressing challenges. The paper highlights AI capabilities (including computer vision, generative AI, natural language processing, and multimodal AI) and showcases how AI is altering how we approach problem-solving across all 17 SDGs through use cases, with a spotlight on AI-powered innovation in health, education, and climate. We then offer insights on AI development and deployment to drive bold and responsible innovation, enhance impact, close the accessibility gap, and ensure that everyone, everywhere, can benefit from AI. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.02711v1-abstract-full').style.display = 'none'; document.getElementById('2407.02711v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.19522">arXiv:2405.19522</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.19522">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Artificial Intelligence Index Report 2024 </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Maslej%2C+N">Nestor Maslej</a>, <a href="/search/cs?searchtype=author&amp;query=Fattorini%2C+L">Loredana Fattorini</a>, <a href="/search/cs?searchtype=author&amp;query=Perrault%2C+R">Raymond Perrault</a>, <a href="/search/cs?searchtype=author&amp;query=Parli%2C+V">Vanessa Parli</a>, <a href="/search/cs?searchtype=author&amp;query=Reuel%2C+A">Anka Reuel</a>, <a href="/search/cs?searchtype=author&amp;query=Brynjolfsson%2C+E">Erik Brynjolfsson</a>, <a href="/search/cs?searchtype=author&amp;query=Etchemendy%2C+J">John Etchemendy</a>, <a href="/search/cs?searchtype=author&amp;query=Ligett%2C+K">Katrina Ligett</a>, <a href="/search/cs?searchtype=author&amp;query=Lyons%2C+T">Terah Lyons</a>, <a href="/search/cs?searchtype=author&amp;query=Manyika%2C+J">James Manyika</a>, <a href="/search/cs?searchtype=author&amp;query=Niebles%2C+J+C">Juan Carlos Niebles</a>, <a href="/search/cs?searchtype=author&amp;query=Shoham%2C+Y">Yoav Shoham</a>, <a href="/search/cs?searchtype=author&amp;query=Wald%2C+R">Russell Wald</a>, <a href="/search/cs?searchtype=author&amp;query=Clark%2C+J">Jack Clark</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.19522v1-abstract-short" style="display: inline;"> The 2024 Index is our most comprehensive to date and arrives at an important moment when AI&#39;s influence on society has never been more pronounced. This year, we have broadened our scope to more extensively cover essential trends such as technical advancements in AI, public perceptions of the technology, and the geopolitical dynamics surrounding its development. Featuring more original data than ev&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.19522v1-abstract-full').style.display = 'inline'; document.getElementById('2405.19522v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.19522v1-abstract-full" style="display: none;"> The 2024 Index is our most comprehensive to date and arrives at an important moment when AI&#39;s influence on society has never been more pronounced. This year, we have broadened our scope to more extensively cover essential trends such as technical advancements in AI, public perceptions of the technology, and the geopolitical dynamics surrounding its development. Featuring more original data than ever before, this edition introduces new estimates on AI training costs, detailed analyses of the responsible AI landscape, and an entirely new chapter dedicated to AI&#39;s impact on science and medicine. The AI Index report tracks, collates, distills, and visualizes data related to artificial intelligence (AI). Our mission is to provide unbiased, rigorously vetted, broadly sourced data in order for policymakers, researchers, executives, journalists, and the general public to develop a more thorough and nuanced understanding of the complex field of AI. The AI Index is recognized globally as one of the most credible and authoritative sources for data and insights on artificial intelligence. Previous editions have been cited in major newspapers, including the The New York Times, Bloomberg, and The Guardian, have amassed hundreds of academic citations, and been referenced by high-level policymakers in the United States, the United Kingdom, and the European Union, among other places. This year&#39;s edition surpasses all previous ones in size, scale, and scope, reflecting the growing significance that AI is coming to hold in all of our lives. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.19522v1-abstract-full').style.display = 'none'; document.getElementById('2405.19522v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.18416">arXiv:2404.18416</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.18416">pdf</a>, <a href="https://arxiv.org/format/2404.18416">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Capabilities of Gemini Models in Medicine </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Saab%2C+K">Khaled Saab</a>, <a href="/search/cs?searchtype=author&amp;query=Tu%2C+T">Tao Tu</a>, <a href="/search/cs?searchtype=author&amp;query=Weng%2C+W">Wei-Hung Weng</a>, <a href="/search/cs?searchtype=author&amp;query=Tanno%2C+R">Ryutaro Tanno</a>, <a href="/search/cs?searchtype=author&amp;query=Stutz%2C+D">David Stutz</a>, <a href="/search/cs?searchtype=author&amp;query=Wulczyn%2C+E">Ellery Wulczyn</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+F">Fan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Strother%2C+T">Tim Strother</a>, <a href="/search/cs?searchtype=author&amp;query=Park%2C+C">Chunjong Park</a>, <a href="/search/cs?searchtype=author&amp;query=Vedadi%2C+E">Elahe Vedadi</a>, <a href="/search/cs?searchtype=author&amp;query=Chaves%2C+J+Z">Juanma Zambrano Chaves</a>, <a href="/search/cs?searchtype=author&amp;query=Hu%2C+S">Szu-Yeu Hu</a>, <a href="/search/cs?searchtype=author&amp;query=Schaekermann%2C+M">Mike Schaekermann</a>, <a href="/search/cs?searchtype=author&amp;query=Kamath%2C+A">Aishwarya Kamath</a>, <a href="/search/cs?searchtype=author&amp;query=Cheng%2C+Y">Yong Cheng</a>, <a href="/search/cs?searchtype=author&amp;query=Barrett%2C+D+G+T">David G. T. Barrett</a>, <a href="/search/cs?searchtype=author&amp;query=Cheung%2C+C">Cathy Cheung</a>, <a href="/search/cs?searchtype=author&amp;query=Mustafa%2C+B">Basil Mustafa</a>, <a href="/search/cs?searchtype=author&amp;query=Palepu%2C+A">Anil Palepu</a>, <a href="/search/cs?searchtype=author&amp;query=McDuff%2C+D">Daniel McDuff</a>, <a href="/search/cs?searchtype=author&amp;query=Hou%2C+L">Le Hou</a>, <a href="/search/cs?searchtype=author&amp;query=Golany%2C+T">Tomer Golany</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+L">Luyang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Alayrac%2C+J">Jean-baptiste Alayrac</a>, <a href="/search/cs?searchtype=author&amp;query=Houlsby%2C+N">Neil Houlsby</a> , et al. (42 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.18416v2-abstract-short" style="display: inline;"> Excellence in a wide variety of medical applications poses considerable challenges for AI, requiring advanced reasoning, access to up-to-date medical knowledge and understanding of complex multimodal data. Gemini models, with strong general capabilities in multimodal and long-context reasoning, offer exciting possibilities in medicine. Building on these core strengths of Gemini, we introduce Med-G&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.18416v2-abstract-full').style.display = 'inline'; document.getElementById('2404.18416v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.18416v2-abstract-full" style="display: none;"> Excellence in a wide variety of medical applications poses considerable challenges for AI, requiring advanced reasoning, access to up-to-date medical knowledge and understanding of complex multimodal data. Gemini models, with strong general capabilities in multimodal and long-context reasoning, offer exciting possibilities in medicine. Building on these core strengths of Gemini, we introduce Med-Gemini, a family of highly capable multimodal models that are specialized in medicine with the ability to seamlessly use web search, and that can be efficiently tailored to novel modalities using custom encoders. We evaluate Med-Gemini on 14 medical benchmarks, establishing new state-of-the-art (SoTA) performance on 10 of them, and surpass the GPT-4 model family on every benchmark where a direct comparison is viable, often by a wide margin. On the popular MedQA (USMLE) benchmark, our best-performing Med-Gemini model achieves SoTA performance of 91.1% accuracy, using a novel uncertainty-guided search strategy. On 7 multimodal benchmarks including NEJM Image Challenges and MMMU (health &amp; medicine), Med-Gemini improves over GPT-4V by an average relative margin of 44.5%. We demonstrate the effectiveness of Med-Gemini&#39;s long-context capabilities through SoTA performance on a needle-in-a-haystack retrieval task from long de-identified health records and medical video question answering, surpassing prior bespoke methods using only in-context learning. Finally, Med-Gemini&#39;s performance suggests real-world utility by surpassing human experts on tasks such as medical text summarization, alongside demonstrations of promising potential for multimodal medical dialogue, medical research and education. Taken together, our results offer compelling evidence for Med-Gemini&#39;s potential, although further rigorous evaluation will be crucial before real-world deployment in this safety-critical domain. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.18416v2-abstract-full').style.display = 'none'; document.getElementById('2404.18416v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.16244">arXiv:2404.16244</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.16244">pdf</a>, <a href="https://arxiv.org/format/2404.16244">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> The Ethics of Advanced AI Assistants </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Gabriel%2C+I">Iason Gabriel</a>, <a href="/search/cs?searchtype=author&amp;query=Manzini%2C+A">Arianna Manzini</a>, <a href="/search/cs?searchtype=author&amp;query=Keeling%2C+G">Geoff Keeling</a>, <a href="/search/cs?searchtype=author&amp;query=Hendricks%2C+L+A">Lisa Anne Hendricks</a>, <a href="/search/cs?searchtype=author&amp;query=Rieser%2C+V">Verena Rieser</a>, <a href="/search/cs?searchtype=author&amp;query=Iqbal%2C+H">Hasan Iqbal</a>, <a href="/search/cs?searchtype=author&amp;query=Toma%C5%A1ev%2C+N">Nenad Toma拧ev</a>, <a href="/search/cs?searchtype=author&amp;query=Ktena%2C+I">Ira Ktena</a>, <a href="/search/cs?searchtype=author&amp;query=Kenton%2C+Z">Zachary Kenton</a>, <a href="/search/cs?searchtype=author&amp;query=Rodriguez%2C+M">Mikel Rodriguez</a>, <a href="/search/cs?searchtype=author&amp;query=El-Sayed%2C+S">Seliem El-Sayed</a>, <a href="/search/cs?searchtype=author&amp;query=Brown%2C+S">Sasha Brown</a>, <a href="/search/cs?searchtype=author&amp;query=Akbulut%2C+C">Canfer Akbulut</a>, <a href="/search/cs?searchtype=author&amp;query=Trask%2C+A">Andrew Trask</a>, <a href="/search/cs?searchtype=author&amp;query=Hughes%2C+E">Edward Hughes</a>, <a href="/search/cs?searchtype=author&amp;query=Bergman%2C+A+S">A. Stevie Bergman</a>, <a href="/search/cs?searchtype=author&amp;query=Shelby%2C+R">Renee Shelby</a>, <a href="/search/cs?searchtype=author&amp;query=Marchal%2C+N">Nahema Marchal</a>, <a href="/search/cs?searchtype=author&amp;query=Griffin%2C+C">Conor Griffin</a>, <a href="/search/cs?searchtype=author&amp;query=Mateos-Garcia%2C+J">Juan Mateos-Garcia</a>, <a href="/search/cs?searchtype=author&amp;query=Weidinger%2C+L">Laura Weidinger</a>, <a href="/search/cs?searchtype=author&amp;query=Street%2C+W">Winnie Street</a>, <a href="/search/cs?searchtype=author&amp;query=Lange%2C+B">Benjamin Lange</a>, <a href="/search/cs?searchtype=author&amp;query=Ingerman%2C+A">Alex Ingerman</a>, <a href="/search/cs?searchtype=author&amp;query=Lentz%2C+A">Alison Lentz</a> , et al. (32 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.16244v2-abstract-short" style="display: inline;"> This paper focuses on the opportunities and the ethical and societal risks posed by advanced AI assistants. We define advanced AI assistants as artificial agents with natural language interfaces, whose function is to plan and execute sequences of actions on behalf of a user, across one or more domains, in line with the user&#39;s expectations. The paper starts by considering the technology itself, pro&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.16244v2-abstract-full').style.display = 'inline'; document.getElementById('2404.16244v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.16244v2-abstract-full" style="display: none;"> This paper focuses on the opportunities and the ethical and societal risks posed by advanced AI assistants. We define advanced AI assistants as artificial agents with natural language interfaces, whose function is to plan and execute sequences of actions on behalf of a user, across one or more domains, in line with the user&#39;s expectations. The paper starts by considering the technology itself, providing an overview of AI assistants, their technical foundations and potential range of applications. It then explores questions around AI value alignment, well-being, safety and malicious uses. Extending the circle of inquiry further, we next consider the relationship between advanced AI assistants and individual users in more detail, exploring topics such as manipulation and persuasion, anthropomorphism, appropriate relationships, trust and privacy. With this analysis in place, we consider the deployment of advanced assistants at a societal scale, focusing on cooperation, equity and access, misinformation, economic impact, the environment and how best to evaluate advanced AI assistants. Finally, we conclude by providing a range of recommendations for researchers, developers, policymakers and public stakeholders. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.16244v2-abstract-full').style.display = 'none'; document.getElementById('2404.16244v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.05530">arXiv:2403.05530</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.05530">pdf</a>, <a href="https://arxiv.org/format/2403.05530">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Gemini+Team"> Gemini Team</a>, <a href="/search/cs?searchtype=author&amp;query=Georgiev%2C+P">Petko Georgiev</a>, <a href="/search/cs?searchtype=author&amp;query=Lei%2C+V+I">Ving Ian Lei</a>, <a href="/search/cs?searchtype=author&amp;query=Burnell%2C+R">Ryan Burnell</a>, <a href="/search/cs?searchtype=author&amp;query=Bai%2C+L">Libin Bai</a>, <a href="/search/cs?searchtype=author&amp;query=Gulati%2C+A">Anmol Gulati</a>, <a href="/search/cs?searchtype=author&amp;query=Tanzer%2C+G">Garrett Tanzer</a>, <a href="/search/cs?searchtype=author&amp;query=Vincent%2C+D">Damien Vincent</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+Z">Zhufeng Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+S">Shibo Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Mariooryad%2C+S">Soroosh Mariooryad</a>, <a href="/search/cs?searchtype=author&amp;query=Ding%2C+Y">Yifan Ding</a>, <a href="/search/cs?searchtype=author&amp;query=Geng%2C+X">Xinyang Geng</a>, <a href="/search/cs?searchtype=author&amp;query=Alcober%2C+F">Fred Alcober</a>, <a href="/search/cs?searchtype=author&amp;query=Frostig%2C+R">Roy Frostig</a>, <a href="/search/cs?searchtype=author&amp;query=Omernick%2C+M">Mark Omernick</a>, <a href="/search/cs?searchtype=author&amp;query=Walker%2C+L">Lexi Walker</a>, <a href="/search/cs?searchtype=author&amp;query=Paduraru%2C+C">Cosmin Paduraru</a>, <a href="/search/cs?searchtype=author&amp;query=Sorokin%2C+C">Christina Sorokin</a>, <a href="/search/cs?searchtype=author&amp;query=Tacchetti%2C+A">Andrea Tacchetti</a>, <a href="/search/cs?searchtype=author&amp;query=Gaffney%2C+C">Colin Gaffney</a>, <a href="/search/cs?searchtype=author&amp;query=Daruki%2C+S">Samira Daruki</a>, <a href="/search/cs?searchtype=author&amp;query=Sercinoglu%2C+O">Olcan Sercinoglu</a>, <a href="/search/cs?searchtype=author&amp;query=Gleicher%2C+Z">Zach Gleicher</a>, <a href="/search/cs?searchtype=author&amp;query=Love%2C+J">Juliette Love</a> , et al. (1112 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.05530v5-abstract-short" style="display: inline;"> In this report, we introduce the Gemini 1.5 family of models, representing the next generation of highly compute-efficient multimodal models capable of recalling and reasoning over fine-grained information from millions of tokens of context, including multiple long documents and hours of video and audio. The family includes two new models: (1) an updated Gemini 1.5 Pro, which exceeds the February&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.05530v5-abstract-full').style.display = 'inline'; document.getElementById('2403.05530v5-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.05530v5-abstract-full" style="display: none;"> In this report, we introduce the Gemini 1.5 family of models, representing the next generation of highly compute-efficient multimodal models capable of recalling and reasoning over fine-grained information from millions of tokens of context, including multiple long documents and hours of video and audio. The family includes two new models: (1) an updated Gemini 1.5 Pro, which exceeds the February version on the great majority of capabilities and benchmarks; (2) Gemini 1.5 Flash, a more lightweight variant designed for efficiency with minimal regression in quality. Gemini 1.5 models achieve near-perfect recall on long-context retrieval tasks across modalities, improve the state-of-the-art in long-document QA, long-video QA and long-context ASR, and match or surpass Gemini 1.0 Ultra&#39;s state-of-the-art performance across a broad set of benchmarks. Studying the limits of Gemini 1.5&#39;s long-context ability, we find continued improvement in next-token prediction and near-perfect retrieval (&gt;99%) up to at least 10M tokens, a generational leap over existing models such as Claude 3.0 (200k) and GPT-4 Turbo (128k). Finally, we highlight real-world use cases, such as Gemini 1.5 collaborating with professionals on completing their tasks achieving 26 to 75% time savings across 10 different job categories, as well as surprising new capabilities of large language models at the frontier; when given a grammar manual for Kalamang, a language with fewer than 200 speakers worldwide, the model learns to translate English to Kalamang at a similar level to a person who learned from the same content. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.05530v5-abstract-full').style.display = 'none'; document.getElementById('2403.05530v5-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.11805">arXiv:2312.11805</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.11805">pdf</a>, <a href="https://arxiv.org/format/2312.11805">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Gemini: A Family of Highly Capable Multimodal Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Gemini+Team"> Gemini Team</a>, <a href="/search/cs?searchtype=author&amp;query=Anil%2C+R">Rohan Anil</a>, <a href="/search/cs?searchtype=author&amp;query=Borgeaud%2C+S">Sebastian Borgeaud</a>, <a href="/search/cs?searchtype=author&amp;query=Alayrac%2C+J">Jean-Baptiste Alayrac</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+J">Jiahui Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Soricut%2C+R">Radu Soricut</a>, <a href="/search/cs?searchtype=author&amp;query=Schalkwyk%2C+J">Johan Schalkwyk</a>, <a href="/search/cs?searchtype=author&amp;query=Dai%2C+A+M">Andrew M. Dai</a>, <a href="/search/cs?searchtype=author&amp;query=Hauth%2C+A">Anja Hauth</a>, <a href="/search/cs?searchtype=author&amp;query=Millican%2C+K">Katie Millican</a>, <a href="/search/cs?searchtype=author&amp;query=Silver%2C+D">David Silver</a>, <a href="/search/cs?searchtype=author&amp;query=Johnson%2C+M">Melvin Johnson</a>, <a href="/search/cs?searchtype=author&amp;query=Antonoglou%2C+I">Ioannis Antonoglou</a>, <a href="/search/cs?searchtype=author&amp;query=Schrittwieser%2C+J">Julian Schrittwieser</a>, <a href="/search/cs?searchtype=author&amp;query=Glaese%2C+A">Amelia Glaese</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jilin Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Pitler%2C+E">Emily Pitler</a>, <a href="/search/cs?searchtype=author&amp;query=Lillicrap%2C+T">Timothy Lillicrap</a>, <a href="/search/cs?searchtype=author&amp;query=Lazaridou%2C+A">Angeliki Lazaridou</a>, <a href="/search/cs?searchtype=author&amp;query=Firat%2C+O">Orhan Firat</a>, <a href="/search/cs?searchtype=author&amp;query=Molloy%2C+J">James Molloy</a>, <a href="/search/cs?searchtype=author&amp;query=Isard%2C+M">Michael Isard</a>, <a href="/search/cs?searchtype=author&amp;query=Barham%2C+P+R">Paul R. Barham</a>, <a href="/search/cs?searchtype=author&amp;query=Hennigan%2C+T">Tom Hennigan</a>, <a href="/search/cs?searchtype=author&amp;query=Lee%2C+B">Benjamin Lee</a> , et al. (1325 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.11805v4-abstract-short" style="display: inline;"> This report introduces a new family of multimodal models, Gemini, that exhibit remarkable capabilities across image, audio, video, and text understanding. The Gemini family consists of Ultra, Pro, and Nano sizes, suitable for applications ranging from complex reasoning tasks to on-device memory-constrained use-cases. Evaluation on a broad range of benchmarks shows that our most-capable Gemini Ultr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.11805v4-abstract-full').style.display = 'inline'; document.getElementById('2312.11805v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.11805v4-abstract-full" style="display: none;"> This report introduces a new family of multimodal models, Gemini, that exhibit remarkable capabilities across image, audio, video, and text understanding. The Gemini family consists of Ultra, Pro, and Nano sizes, suitable for applications ranging from complex reasoning tasks to on-device memory-constrained use-cases. Evaluation on a broad range of benchmarks shows that our most-capable Gemini Ultra model advances the state of the art in 30 of 32 of these benchmarks - notably being the first model to achieve human-expert performance on the well-studied exam benchmark MMLU, and improving the state of the art in every one of the 20 multimodal benchmarks we examined. We believe that the new capabilities of the Gemini family in cross-modal reasoning and language understanding will enable a wide variety of use cases. We discuss our approach toward post-training and deploying Gemini models responsibly to users through services including Gemini, Gemini Advanced, Google AI Studio, and Cloud Vertex AI. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.11805v4-abstract-full').style.display = 'none'; document.getElementById('2312.11805v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.03715">arXiv:2310.03715</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.03715">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> Artificial Intelligence Index Report 2023 </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Maslej%2C+N">Nestor Maslej</a>, <a href="/search/cs?searchtype=author&amp;query=Fattorini%2C+L">Loredana Fattorini</a>, <a href="/search/cs?searchtype=author&amp;query=Brynjolfsson%2C+E">Erik Brynjolfsson</a>, <a href="/search/cs?searchtype=author&amp;query=Etchemendy%2C+J">John Etchemendy</a>, <a href="/search/cs?searchtype=author&amp;query=Ligett%2C+K">Katrina Ligett</a>, <a href="/search/cs?searchtype=author&amp;query=Lyons%2C+T">Terah Lyons</a>, <a href="/search/cs?searchtype=author&amp;query=Manyika%2C+J">James Manyika</a>, <a href="/search/cs?searchtype=author&amp;query=Ngo%2C+H">Helen Ngo</a>, <a href="/search/cs?searchtype=author&amp;query=Niebles%2C+J+C">Juan Carlos Niebles</a>, <a href="/search/cs?searchtype=author&amp;query=Parli%2C+V">Vanessa Parli</a>, <a href="/search/cs?searchtype=author&amp;query=Shoham%2C+Y">Yoav Shoham</a>, <a href="/search/cs?searchtype=author&amp;query=Wald%2C+R">Russell Wald</a>, <a href="/search/cs?searchtype=author&amp;query=Clark%2C+J">Jack Clark</a>, <a href="/search/cs?searchtype=author&amp;query=Perrault%2C+R">Raymond Perrault</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.03715v1-abstract-short" style="display: inline;"> Welcome to the sixth edition of the AI Index Report. This year, the report introduces more original data than any previous edition, including a new chapter on AI public opinion, a more thorough technical performance chapter, original analysis about large language and multimodal models, detailed trends in global AI legislation records, a study of the environmental impact of AI systems, and more. Th&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.03715v1-abstract-full').style.display = 'inline'; document.getElementById('2310.03715v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.03715v1-abstract-full" style="display: none;"> Welcome to the sixth edition of the AI Index Report. This year, the report introduces more original data than any previous edition, including a new chapter on AI public opinion, a more thorough technical performance chapter, original analysis about large language and multimodal models, detailed trends in global AI legislation records, a study of the environmental impact of AI systems, and more. The AI Index Report tracks, collates, distills, and visualizes data related to artificial intelligence. Our mission is to provide unbiased, rigorously vetted, broadly sourced data in order for policymakers, researchers, executives, journalists, and the general public to develop a more thorough and nuanced understanding of the complex field of AI. The report aims to be the world&#39;s most credible and authoritative source for data and insights about AI. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.03715v1-abstract-full').style.display = 'none'; document.getElementById('2310.03715v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.13054">arXiv:2309.13054</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2309.13054">pdf</a>, <a href="https://arxiv.org/format/2309.13054">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Data Commons </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Guha%2C+R+V">Ramanathan V. Guha</a>, <a href="/search/cs?searchtype=author&amp;query=Radhakrishnan%2C+P">Prashanth Radhakrishnan</a>, <a href="/search/cs?searchtype=author&amp;query=Xu%2C+B">Bo Xu</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+W">Wei Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Au%2C+C">Carolyn Au</a>, <a href="/search/cs?searchtype=author&amp;query=Tirumali%2C+A">Ajai Tirumali</a>, <a href="/search/cs?searchtype=author&amp;query=Amjad%2C+M+J">Muhammad J. Amjad</a>, <a href="/search/cs?searchtype=author&amp;query=Piekos%2C+S">Samantha Piekos</a>, <a href="/search/cs?searchtype=author&amp;query=Diaz%2C+N">Natalie Diaz</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jennifer Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+J">Julia Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Ramaswami%2C+P">Prem Ramaswami</a>, <a href="/search/cs?searchtype=author&amp;query=Manyika%2C+J">James Manyika</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.13054v1-abstract-short" style="display: inline;"> Publicly available data from open sources (e.g., United States Census Bureau (Census), World Health Organization (WHO), Intergovernmental Panel on Climate Change (IPCC)) are vital resources for policy makers, students and researchers across different disciplines. Combining data from different sources requires the user to reconcile the differences in schemas, formats, assumptions, and more. This da&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.13054v1-abstract-full').style.display = 'inline'; document.getElementById('2309.13054v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.13054v1-abstract-full" style="display: none;"> Publicly available data from open sources (e.g., United States Census Bureau (Census), World Health Organization (WHO), Intergovernmental Panel on Climate Change (IPCC)) are vital resources for policy makers, students and researchers across different disciplines. Combining data from different sources requires the user to reconcile the differences in schemas, formats, assumptions, and more. This data wrangling is time consuming, tedious and needs to be repeated by every user of the data. Our goal with Data Commons (DC) is to help make public data accessible and useful to those who want to understand this data and use it to solve societal challenges and opportunities. We do the data processing and make the processed data widely available via standard schemas and Cloud APIs. Data Commons is a distributed network of sites that publish data in a common schema and interoperate using the Data Commons APIs. Data from different Data Commons can be joined easily. The aggregate of these Data Commons can be viewed as a single Knowledge Graph. This Knowledge Graph can then be searched over using Natural Language questions utilizing advances in Large Language Models. This paper describes the architecture of Data Commons, some of the major deployments and highlights directions for future work. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.13054v1-abstract-full').style.display = 'none'; document.getElementById('2309.13054v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.03468">arXiv:2205.03468</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2205.03468">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> The AI Index 2022 Annual Report </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+D">Daniel Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Maslej%2C+N">Nestor Maslej</a>, <a href="/search/cs?searchtype=author&amp;query=Brynjolfsson%2C+E">Erik Brynjolfsson</a>, <a href="/search/cs?searchtype=author&amp;query=Etchemendy%2C+J">John Etchemendy</a>, <a href="/search/cs?searchtype=author&amp;query=Lyons%2C+T">Terah Lyons</a>, <a href="/search/cs?searchtype=author&amp;query=Manyika%2C+J">James Manyika</a>, <a href="/search/cs?searchtype=author&amp;query=Ngo%2C+H">Helen Ngo</a>, <a href="/search/cs?searchtype=author&amp;query=Niebles%2C+J+C">Juan Carlos Niebles</a>, <a href="/search/cs?searchtype=author&amp;query=Sellitto%2C+M">Michael Sellitto</a>, <a href="/search/cs?searchtype=author&amp;query=Sakhaee%2C+E">Ellie Sakhaee</a>, <a href="/search/cs?searchtype=author&amp;query=Shoham%2C+Y">Yoav Shoham</a>, <a href="/search/cs?searchtype=author&amp;query=Clark%2C+J">Jack Clark</a>, <a href="/search/cs?searchtype=author&amp;query=Perrault%2C+R">Raymond Perrault</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.03468v1-abstract-short" style="display: inline;"> Welcome to the fifth edition of the AI Index Report! The latest edition includes data from a broad set of academic, private, and nonprofit organizations as well as more self-collected data and original analysis than any previous editions, including an expanded technical performance chapter, a new survey of robotics researchers around the world, data on global AI legislation records in 25 countries&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.03468v1-abstract-full').style.display = 'inline'; document.getElementById('2205.03468v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.03468v1-abstract-full" style="display: none;"> Welcome to the fifth edition of the AI Index Report! The latest edition includes data from a broad set of academic, private, and nonprofit organizations as well as more self-collected data and original analysis than any previous editions, including an expanded technical performance chapter, a new survey of robotics researchers around the world, data on global AI legislation records in 25 countries, and a new chapter with an in-depth analysis of technical AI ethics metrics. The AI Index Report tracks, collates, distills, and visualizes data related to artificial intelligence. Its mission is to provide unbiased, rigorously vetted, and globally sourced data for policymakers, researchers, executives, journalists, and the general public to develop a more thorough and nuanced understanding of the complex field of AI. The report aims to be the world&#39;s most credible and authoritative source for data and insights about AI. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.03468v1-abstract-full').style.display = 'none'; document.getElementById('2205.03468v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2103.06312">arXiv:2103.06312</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2103.06312">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="General Literature">cs.GL</span> </div> </div> <p class="title is-5 mathjax"> The AI Index 2021 Annual Report </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+D">Daniel Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Mishra%2C+S">Saurabh Mishra</a>, <a href="/search/cs?searchtype=author&amp;query=Brynjolfsson%2C+E">Erik Brynjolfsson</a>, <a href="/search/cs?searchtype=author&amp;query=Etchemendy%2C+J">John Etchemendy</a>, <a href="/search/cs?searchtype=author&amp;query=Ganguli%2C+D">Deep Ganguli</a>, <a href="/search/cs?searchtype=author&amp;query=Grosz%2C+B">Barbara Grosz</a>, <a href="/search/cs?searchtype=author&amp;query=Lyons%2C+T">Terah Lyons</a>, <a href="/search/cs?searchtype=author&amp;query=Manyika%2C+J">James Manyika</a>, <a href="/search/cs?searchtype=author&amp;query=Niebles%2C+J+C">Juan Carlos Niebles</a>, <a href="/search/cs?searchtype=author&amp;query=Sellitto%2C+M">Michael Sellitto</a>, <a href="/search/cs?searchtype=author&amp;query=Shoham%2C+Y">Yoav Shoham</a>, <a href="/search/cs?searchtype=author&amp;query=Clark%2C+J">Jack Clark</a>, <a href="/search/cs?searchtype=author&amp;query=Perrault%2C+R">Raymond Perrault</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2103.06312v1-abstract-short" style="display: inline;"> Welcome to the fourth edition of the AI Index Report. This year we significantly expanded the amount of data available in the report, worked with a broader set of external organizations to calibrate our data, and deepened our connections with the Stanford Institute for Human-Centered Artificial Intelligence (HAI). The AI Index Report tracks, collates, distills, and visualizes data related to artif&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.06312v1-abstract-full').style.display = 'inline'; document.getElementById('2103.06312v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2103.06312v1-abstract-full" style="display: none;"> Welcome to the fourth edition of the AI Index Report. This year we significantly expanded the amount of data available in the report, worked with a broader set of external organizations to calibrate our data, and deepened our connections with the Stanford Institute for Human-Centered Artificial Intelligence (HAI). The AI Index Report tracks, collates, distills, and visualizes data related to artificial intelligence. Its mission is to provide unbiased, rigorously vetted, and globally sourced data for policymakers, researchers, executives, journalists, and the general public to develop intuitions about the complex field of AI. The report aims to be the most credible and authoritative source for data and insights about AI in the world. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.06312v1-abstract-full').style.display = 'none'; document.getElementById('2103.06312v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 March, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2021. </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10