CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–10 of 10 results for author: <span class="mathjax">Siddarth, D</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&query=Siddarth%2C+D">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Siddarth, D"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Siddarth%2C+D&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Siddarth, D"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.06729">arXiv:2409.06729</a> <span> [<a href="https://arxiv.org/pdf/2409.06729">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> How will advanced AI systems impact democracy? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Summerfield%2C+C">Christopher Summerfield</a>, <a href="/search/cs?searchtype=author&query=Argyle%2C+L">Lisa Argyle</a>, <a href="/search/cs?searchtype=author&query=Bakker%2C+M">Michiel Bakker</a>, <a href="/search/cs?searchtype=author&query=Collins%2C+T">Teddy Collins</a>, <a href="/search/cs?searchtype=author&query=Durmus%2C+E">Esin Durmus</a>, <a href="/search/cs?searchtype=author&query=Eloundou%2C+T">Tyna Eloundou</a>, <a href="/search/cs?searchtype=author&query=Gabriel%2C+I">Iason Gabriel</a>, <a href="/search/cs?searchtype=author&query=Ganguli%2C+D">Deep Ganguli</a>, <a href="/search/cs?searchtype=author&query=Hackenburg%2C+K">Kobi Hackenburg</a>, <a href="/search/cs?searchtype=author&query=Hadfield%2C+G">Gillian Hadfield</a>, <a href="/search/cs?searchtype=author&query=Hewitt%2C+L">Luke Hewitt</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+S">Saffron Huang</a>, <a href="/search/cs?searchtype=author&query=Landemore%2C+H">Helene Landemore</a>, <a href="/search/cs?searchtype=author&query=Marchal%2C+N">Nahema Marchal</a>, <a href="/search/cs?searchtype=author&query=Ovadya%2C+A">Aviv Ovadya</a>, <a href="/search/cs?searchtype=author&query=Procaccia%2C+A">Ariel Procaccia</a>, <a href="/search/cs?searchtype=author&query=Risse%2C+M">Mathias Risse</a>, <a href="/search/cs?searchtype=author&query=Schneier%2C+B">Bruce Schneier</a>, <a href="/search/cs?searchtype=author&query=Seger%2C+E">Elizabeth Seger</a>, <a href="/search/cs?searchtype=author&query=Siddarth%2C+D">Divya Siddarth</a>, <a href="/search/cs?searchtype=author&query=S%C3%A6tra%2C+H+S">Henrik Skaug S忙tra</a>, <a href="/search/cs?searchtype=author&query=Tessler%2C+M">MH Tessler</a>, <a href="/search/cs?searchtype=author&query=Botvinick%2C+M">Matthew Botvinick</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.06729v1-abstract-short" style="display: inline;"> Advanced AI systems capable of generating humanlike text and multimodal content are now widely available. In this paper, we discuss the impacts that generative artificial intelligence may have on democratic processes. We consider the consequences of AI for citizens' ability to make informed choices about political representatives and issues (epistemic impacts). We ask how AI might be used to desta… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.06729v1-abstract-full').style.display = 'inline'; document.getElementById('2409.06729v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.06729v1-abstract-full" style="display: none;"> Advanced AI systems capable of generating humanlike text and multimodal content are now widely available. In this paper, we discuss the impacts that generative artificial intelligence may have on democratic processes. We consider the consequences of AI for citizens' ability to make informed choices about political representatives and issues (epistemic impacts). We ask how AI might be used to destabilise or support democratic mechanisms like elections (material impacts). Finally, we discuss whether AI will strengthen or weaken democratic principles (foundational impacts). It is widely acknowledged that new AI systems could pose significant challenges for democracy. However, it has also been argued that generative AI offers new opportunities to educate and learn from citizens, strengthen public discourse, help people find common ground, and to reimagine how democracies might work better. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.06729v1-abstract-full').style.display = 'none'; document.getElementById('2409.06729v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">25 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.07892">arXiv:2408.07892</a> <span> [<a href="https://arxiv.org/pdf/2408.07892">pdf</a>, <a href="https://arxiv.org/format/2408.07892">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> Personhood credentials: Artificial intelligence and the value of privacy-preserving tools to distinguish who is real online </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Adler%2C+S">Steven Adler</a>, <a href="/search/cs?searchtype=author&query=Hitzig%2C+Z">Zo毛 Hitzig</a>, <a href="/search/cs?searchtype=author&query=Jain%2C+S">Shrey Jain</a>, <a href="/search/cs?searchtype=author&query=Brewer%2C+C">Catherine Brewer</a>, <a href="/search/cs?searchtype=author&query=Chang%2C+W">Wayne Chang</a>, <a href="/search/cs?searchtype=author&query=DiResta%2C+R">Ren茅e DiResta</a>, <a href="/search/cs?searchtype=author&query=Lazzarin%2C+E">Eddy Lazzarin</a>, <a href="/search/cs?searchtype=author&query=McGregor%2C+S">Sean McGregor</a>, <a href="/search/cs?searchtype=author&query=Seltzer%2C+W">Wendy Seltzer</a>, <a href="/search/cs?searchtype=author&query=Siddarth%2C+D">Divya Siddarth</a>, <a href="/search/cs?searchtype=author&query=Soliman%2C+N">Nouran Soliman</a>, <a href="/search/cs?searchtype=author&query=South%2C+T">Tobin South</a>, <a href="/search/cs?searchtype=author&query=Spelliscy%2C+C">Connor Spelliscy</a>, <a href="/search/cs?searchtype=author&query=Sporny%2C+M">Manu Sporny</a>, <a href="/search/cs?searchtype=author&query=Srivastava%2C+V">Varya Srivastava</a>, <a href="/search/cs?searchtype=author&query=Bailey%2C+J">John Bailey</a>, <a href="/search/cs?searchtype=author&query=Christian%2C+B">Brian Christian</a>, <a href="/search/cs?searchtype=author&query=Critch%2C+A">Andrew Critch</a>, <a href="/search/cs?searchtype=author&query=Falcon%2C+R">Ronnie Falcon</a>, <a href="/search/cs?searchtype=author&query=Flanagan%2C+H">Heather Flanagan</a>, <a href="/search/cs?searchtype=author&query=Duffy%2C+K+H">Kim Hamilton Duffy</a>, <a href="/search/cs?searchtype=author&query=Ho%2C+E">Eric Ho</a>, <a href="/search/cs?searchtype=author&query=Leibowicz%2C+C+R">Claire R. Leibowicz</a>, <a href="/search/cs?searchtype=author&query=Nadhamuni%2C+S">Srikanth Nadhamuni</a>, <a href="/search/cs?searchtype=author&query=Rozenshtein%2C+A+Z">Alan Z. Rozenshtein</a> , et al. (7 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.07892v3-abstract-short" style="display: inline;"> Anonymity is an important principle online. However, malicious actors have long used misleading identities to conduct fraud, spread disinformation, and carry out other deceptive schemes. With the advent of increasingly capable AI, bad actors can amplify the potential scale and effectiveness of their operations, intensifying the challenge of balancing anonymity and trustworthiness online. In this p… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.07892v3-abstract-full').style.display = 'inline'; document.getElementById('2408.07892v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.07892v3-abstract-full" style="display: none;"> Anonymity is an important principle online. However, malicious actors have long used misleading identities to conduct fraud, spread disinformation, and carry out other deceptive schemes. With the advent of increasingly capable AI, bad actors can amplify the potential scale and effectiveness of their operations, intensifying the challenge of balancing anonymity and trustworthiness online. In this paper, we analyze the value of a new tool to address this challenge: "personhood credentials" (PHCs), digital credentials that empower users to demonstrate that they are real people -- not AIs -- to online services, without disclosing any personal information. Such credentials can be issued by a range of trusted institutions -- governments or otherwise. A PHC system, according to our definition, could be local or global, and does not need to be biometrics-based. Two trends in AI contribute to the urgency of the challenge: AI's increasing indistinguishability from people online (i.e., lifelike content and avatars, agentic activity), and AI's increasing scalability (i.e., cost-effectiveness, accessibility). Drawing on a long history of research into anonymous credentials and "proof-of-personhood" systems, personhood credentials give people a way to signal their trustworthiness on online platforms, and offer service providers new tools for reducing misuse by bad actors. In contrast, existing countermeasures to automated deception -- such as CAPTCHAs -- are inadequate against sophisticated AI, while stringent identity verification solutions are insufficiently private for many use-cases. After surveying the benefits of personhood credentials, we also examine deployment risks and design challenges. We conclude with actionable next steps for policymakers, technologists, and standards bodies to consider in consultation with the public. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.07892v3-abstract-full').style.display = 'none'; document.getElementById('2408.07892v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">63 pages, 7 figures, 5 tables; minor additions to acknowledgments and wording changes for clarity; corrected typo</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.07814">arXiv:2406.07814</a> <span> [<a href="https://arxiv.org/pdf/2406.07814">pdf</a>, <a href="https://arxiv.org/format/2406.07814">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3630106.3658979">10.1145/3630106.3658979 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Collective Constitutional AI: Aligning a Language Model with Public Input </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Huang%2C+S">Saffron Huang</a>, <a href="/search/cs?searchtype=author&query=Siddarth%2C+D">Divya Siddarth</a>, <a href="/search/cs?searchtype=author&query=Lovitt%2C+L">Liane Lovitt</a>, <a href="/search/cs?searchtype=author&query=Liao%2C+T+I">Thomas I. Liao</a>, <a href="/search/cs?searchtype=author&query=Durmus%2C+E">Esin Durmus</a>, <a href="/search/cs?searchtype=author&query=Tamkin%2C+A">Alex Tamkin</a>, <a href="/search/cs?searchtype=author&query=Ganguli%2C+D">Deep Ganguli</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.07814v1-abstract-short" style="display: inline;"> There is growing consensus that language model (LM) developers should not be the sole deciders of LM behavior, creating a need for methods that enable the broader public to collectively shape the behavior of LM systems that affect them. To address this need, we present Collective Constitutional AI (CCAI): a multi-stage process for sourcing and integrating public input into LMs-from identifying a t… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.07814v1-abstract-full').style.display = 'inline'; document.getElementById('2406.07814v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.07814v1-abstract-full" style="display: none;"> There is growing consensus that language model (LM) developers should not be the sole deciders of LM behavior, creating a need for methods that enable the broader public to collectively shape the behavior of LM systems that affect them. To address this need, we present Collective Constitutional AI (CCAI): a multi-stage process for sourcing and integrating public input into LMs-from identifying a target population to sourcing principles to training and evaluating a model. We demonstrate the real-world practicality of this approach by creating what is, to our knowledge, the first LM fine-tuned with collectively sourced public input and evaluating this model against a baseline model trained with established principles from a LM developer. Our quantitative evaluations demonstrate several benefits of our approach: the CCAI-trained model shows lower bias across nine social dimensions compared to the baseline model, while maintaining equivalent performance on language, math, and helpful-harmless evaluations. Qualitative comparisons of the models suggest that the models differ on the basis of their respective constitutions, e.g., when prompted with contentious topics, the CCAI-trained model tends to generate responses that reframe the matter positively instead of a refusal. These results demonstrate a promising, tractable pathway toward publicly informed development of language models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.07814v1-abstract-full').style.display = 'none'; document.getElementById('2406.07814v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.7; K.4.2 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Proceedings of the 2024 ACM Conference on Fairness, Accountability, and Transparency. 1395-1417 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.03718">arXiv:2307.03718</a> <span> [<a href="https://arxiv.org/pdf/2307.03718">pdf</a>, <a href="https://arxiv.org/format/2307.03718">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Frontier AI Regulation: Managing Emerging Risks to Public Safety </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Anderljung%2C+M">Markus Anderljung</a>, <a href="/search/cs?searchtype=author&query=Barnhart%2C+J">Joslyn Barnhart</a>, <a href="/search/cs?searchtype=author&query=Korinek%2C+A">Anton Korinek</a>, <a href="/search/cs?searchtype=author&query=Leung%2C+J">Jade Leung</a>, <a href="/search/cs?searchtype=author&query=O%27Keefe%2C+C">Cullen O'Keefe</a>, <a href="/search/cs?searchtype=author&query=Whittlestone%2C+J">Jess Whittlestone</a>, <a href="/search/cs?searchtype=author&query=Avin%2C+S">Shahar Avin</a>, <a href="/search/cs?searchtype=author&query=Brundage%2C+M">Miles Brundage</a>, <a href="/search/cs?searchtype=author&query=Bullock%2C+J">Justin Bullock</a>, <a href="/search/cs?searchtype=author&query=Cass-Beggs%2C+D">Duncan Cass-Beggs</a>, <a href="/search/cs?searchtype=author&query=Chang%2C+B">Ben Chang</a>, <a href="/search/cs?searchtype=author&query=Collins%2C+T">Tantum Collins</a>, <a href="/search/cs?searchtype=author&query=Fist%2C+T">Tim Fist</a>, <a href="/search/cs?searchtype=author&query=Hadfield%2C+G">Gillian Hadfield</a>, <a href="/search/cs?searchtype=author&query=Hayes%2C+A">Alan Hayes</a>, <a href="/search/cs?searchtype=author&query=Ho%2C+L">Lewis Ho</a>, <a href="/search/cs?searchtype=author&query=Hooker%2C+S">Sara Hooker</a>, <a href="/search/cs?searchtype=author&query=Horvitz%2C+E">Eric Horvitz</a>, <a href="/search/cs?searchtype=author&query=Kolt%2C+N">Noam Kolt</a>, <a href="/search/cs?searchtype=author&query=Schuett%2C+J">Jonas Schuett</a>, <a href="/search/cs?searchtype=author&query=Shavit%2C+Y">Yonadav Shavit</a>, <a href="/search/cs?searchtype=author&query=Siddarth%2C+D">Divya Siddarth</a>, <a href="/search/cs?searchtype=author&query=Trager%2C+R">Robert Trager</a>, <a href="/search/cs?searchtype=author&query=Wolf%2C+K">Kevin Wolf</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.03718v4-abstract-short" style="display: inline;"> Advanced AI models hold the promise of tremendous benefits for humanity, but society needs to proactively manage the accompanying risks. In this paper, we focus on what we term "frontier AI" models: highly capable foundation models that could possess dangerous capabilities sufficient to pose severe risks to public safety. Frontier AI models pose a distinct regulatory challenge: dangerous capabilit… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.03718v4-abstract-full').style.display = 'inline'; document.getElementById('2307.03718v4-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.03718v4-abstract-full" style="display: none;"> Advanced AI models hold the promise of tremendous benefits for humanity, but society needs to proactively manage the accompanying risks. In this paper, we focus on what we term "frontier AI" models: highly capable foundation models that could possess dangerous capabilities sufficient to pose severe risks to public safety. Frontier AI models pose a distinct regulatory challenge: dangerous capabilities can arise unexpectedly; it is difficult to robustly prevent a deployed model from being misused; and, it is difficult to stop a model's capabilities from proliferating broadly. To address these challenges, at least three building blocks for the regulation of frontier models are needed: (1) standard-setting processes to identify appropriate requirements for frontier AI developers, (2) registration and reporting requirements to provide regulators with visibility into frontier AI development processes, and (3) mechanisms to ensure compliance with safety standards for the development and deployment of frontier AI models. Industry self-regulation is an important first step. However, wider societal discussions and government intervention will be needed to create standards and to ensure compliance with them. We consider several options to this end, including granting enforcement powers to supervisory authorities and licensure regimes for frontier AI models. Finally, we propose an initial set of safety standards. These include conducting pre-deployment risk assessments; external scrutiny of model behavior; using risk assessments to inform deployment decisions; and monitoring and responding to new information about model capabilities and uses post-deployment. We hope this discussion contributes to the broader conversation on how to balance public safety risks and innovation benefits from advances at the frontier of AI development. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.03718v4-abstract-full').style.display = 'none'; document.getElementById('2307.03718v4-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 6 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Update July 11th: - Added missing footnote back in. - Adjusted author order (mistakenly non-alphabetical among the first 6 authors) and adjusted affiliations (Jess Whittlestone's affiliation was mistagged and Gillian Hadfield had SRI added to her affiliations) Updated September 4th: Various typos</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.15324">arXiv:2305.15324</a> <span> [<a href="https://arxiv.org/pdf/2305.15324">pdf</a>, <a href="https://arxiv.org/format/2305.15324">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Model evaluation for extreme risks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Shevlane%2C+T">Toby Shevlane</a>, <a href="/search/cs?searchtype=author&query=Farquhar%2C+S">Sebastian Farquhar</a>, <a href="/search/cs?searchtype=author&query=Garfinkel%2C+B">Ben Garfinkel</a>, <a href="/search/cs?searchtype=author&query=Phuong%2C+M">Mary Phuong</a>, <a href="/search/cs?searchtype=author&query=Whittlestone%2C+J">Jess Whittlestone</a>, <a href="/search/cs?searchtype=author&query=Leung%2C+J">Jade Leung</a>, <a href="/search/cs?searchtype=author&query=Kokotajlo%2C+D">Daniel Kokotajlo</a>, <a href="/search/cs?searchtype=author&query=Marchal%2C+N">Nahema Marchal</a>, <a href="/search/cs?searchtype=author&query=Anderljung%2C+M">Markus Anderljung</a>, <a href="/search/cs?searchtype=author&query=Kolt%2C+N">Noam Kolt</a>, <a href="/search/cs?searchtype=author&query=Ho%2C+L">Lewis Ho</a>, <a href="/search/cs?searchtype=author&query=Siddarth%2C+D">Divya Siddarth</a>, <a href="/search/cs?searchtype=author&query=Avin%2C+S">Shahar Avin</a>, <a href="/search/cs?searchtype=author&query=Hawkins%2C+W">Will Hawkins</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+B">Been Kim</a>, <a href="/search/cs?searchtype=author&query=Gabriel%2C+I">Iason Gabriel</a>, <a href="/search/cs?searchtype=author&query=Bolina%2C+V">Vijay Bolina</a>, <a href="/search/cs?searchtype=author&query=Clark%2C+J">Jack Clark</a>, <a href="/search/cs?searchtype=author&query=Bengio%2C+Y">Yoshua Bengio</a>, <a href="/search/cs?searchtype=author&query=Christiano%2C+P">Paul Christiano</a>, <a href="/search/cs?searchtype=author&query=Dafoe%2C+A">Allan Dafoe</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.15324v2-abstract-short" style="display: inline;"> Current approaches to building general-purpose AI systems tend to produce systems with both beneficial and harmful capabilities. Further progress in AI development could lead to capabilities that pose extreme risks, such as offensive cyber capabilities or strong manipulation skills. We explain why model evaluation is critical for addressing extreme risks. Developers must be able to identify danger… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.15324v2-abstract-full').style.display = 'inline'; document.getElementById('2305.15324v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.15324v2-abstract-full" style="display: none;"> Current approaches to building general-purpose AI systems tend to produce systems with both beneficial and harmful capabilities. Further progress in AI development could lead to capabilities that pose extreme risks, such as offensive cyber capabilities or strong manipulation skills. We explain why model evaluation is critical for addressing extreme risks. Developers must be able to identify dangerous capabilities (through "dangerous capability evaluations") and the propensity of models to apply their capabilities for harm (through "alignment evaluations"). These evaluations will become critical for keeping policymakers and other stakeholders informed, and for making responsible decisions about model training, deployment, and security. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.15324v2-abstract-full').style.display = 'none'; document.getElementById('2305.15324v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Fixed typos; added citation</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> K.4.1 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.12642">arXiv:2303.12642</a> <span> [<a href="https://arxiv.org/pdf/2303.12642">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Democratising AI: Multiple Meanings, Goals, and Methods </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Seger%2C+E">Elizabeth Seger</a>, <a href="/search/cs?searchtype=author&query=Ovadya%2C+A">Aviv Ovadya</a>, <a href="/search/cs?searchtype=author&query=Garfinkel%2C+B">Ben Garfinkel</a>, <a href="/search/cs?searchtype=author&query=Siddarth%2C+D">Divya Siddarth</a>, <a href="/search/cs?searchtype=author&query=Dafoe%2C+A">Allan Dafoe</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.12642v3-abstract-short" style="display: inline;"> Numerous parties are calling for the democratisation of AI, but the phrase is used to refer to a variety of goals, the pursuit of which sometimes conflict. This paper identifies four kinds of AI democratisation that are commonly discussed: (1) the democratisation of AI use, (2) the democratisation of AI development, (3) the democratisation of AI profits, and (4) the democratisation of AI governanc… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.12642v3-abstract-full').style.display = 'inline'; document.getElementById('2303.12642v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.12642v3-abstract-full" style="display: none;"> Numerous parties are calling for the democratisation of AI, but the phrase is used to refer to a variety of goals, the pursuit of which sometimes conflict. This paper identifies four kinds of AI democratisation that are commonly discussed: (1) the democratisation of AI use, (2) the democratisation of AI development, (3) the democratisation of AI profits, and (4) the democratisation of AI governance. Numerous goals and methods of achieving each form of democratisation are discussed. The main takeaway from this paper is that AI democratisation is a multifarious and sometimes conflicting concept that should not be conflated with improving AI accessibility. If we want to move beyond ambiguous commitments to democratising AI, to productive discussions of concrete policies and trade-offs, then we need to recognise the principal role of the democratisation of AI governance in navigating tradeoffs and risks across decisions around use, development, and profits. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.12642v3-abstract-full').style.display = 'none'; document.getElementById('2303.12642v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">V2 Changed second author affiliation; added citation to section 5.2; edit to author contribution statement; V3 camera ready version for conference proceedings. Minor content changes in response to reviewer comments</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.11074">arXiv:2303.11074</a> <span> [<a href="https://arxiv.org/pdf/2303.11074">pdf</a>, <a href="https://arxiv.org/ps/2303.11074">ps</a>, <a href="https://arxiv.org/format/2303.11074">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> Generative AI and the Digital Commons </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Huang%2C+S">Saffron Huang</a>, <a href="/search/cs?searchtype=author&query=Siddarth%2C+D">Divya Siddarth</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.11074v1-abstract-short" style="display: inline;"> Many generative foundation models (or GFMs) are trained on publicly available data and use public infrastructure, but 1) may degrade the "digital commons" that they depend on, and 2) do not have processes in place to return value captured to data producers and stakeholders. Existing conceptions of data rights and protection (focusing largely on individually-owned data and associated privacy concer… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.11074v1-abstract-full').style.display = 'inline'; document.getElementById('2303.11074v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.11074v1-abstract-full" style="display: none;"> Many generative foundation models (or GFMs) are trained on publicly available data and use public infrastructure, but 1) may degrade the "digital commons" that they depend on, and 2) do not have processes in place to return value captured to data producers and stakeholders. Existing conceptions of data rights and protection (focusing largely on individually-owned data and associated privacy concerns) and copyright or licensing-based models offer some instructive priors, but are ill-suited for the issues that may arise from models trained on commons-based data. We outline the risks posed by GFMs and why they are relevant to the digital commons, and propose numerous governance-based solutions that include investments in standardized dataset/model disclosure and other kinds of transparency when it comes to generative models' training and capabilities, consortia-based funding for monitoring/standards/auditing organizations, requirements or norms for GFM companies to contribute high quality data to the commons, and structures for shared ownership based on individual or community provision of fine-tuning data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.11074v1-abstract-full').style.display = 'none'; document.getElementById('2303.11074v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2105.13515">arXiv:2105.13515</a> <span> [<a href="https://arxiv.org/pdf/2105.13515">pdf</a>, <a href="https://arxiv.org/ps/2105.13515">ps</a>, <a href="https://arxiv.org/format/2105.13515">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> Vaccine Credential Technology Principles </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Siddarth%2C+D">Divya Siddarth</a>, <a href="/search/cs?searchtype=author&query=Hart%2C+V">Vi Hart</a>, <a href="/search/cs?searchtype=author&query=Cantrell%2C+B">Bethan Cantrell</a>, <a href="/search/cs?searchtype=author&query=Yasuda%2C+K">Kristina Yasuda</a>, <a href="/search/cs?searchtype=author&query=Mandel%2C+J">Josh Mandel</a>, <a href="/search/cs?searchtype=author&query=Easterbrook%2C+K">Karen Easterbrook</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2105.13515v1-abstract-short" style="display: inline;"> The historically rapid development of effective COVID-19 vaccines has policymakers facing evergreen public health questions regarding vaccination records and verification. Governments and institutions around the world are already taking action on digital vaccine certificates, including guidance and recommendations from the European Commission, the WHO, and the Biden Administration. These could be… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.13515v1-abstract-full').style.display = 'inline'; document.getElementById('2105.13515v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2105.13515v1-abstract-full" style="display: none;"> The historically rapid development of effective COVID-19 vaccines has policymakers facing evergreen public health questions regarding vaccination records and verification. Governments and institutions around the world are already taking action on digital vaccine certificates, including guidance and recommendations from the European Commission, the WHO, and the Biden Administration. These could be encouraging efforts: an effective system for vaccine certificates could potentially be part of a safe return to work, travel, and daily life, and a secure technological implementation could improve on existing systems to prioritize privacy, streamline access, and build for necessary interoperability across countries and contexts. However, vaccine credentials are not without potential harms, and, particularly given major inequities in vaccine access and rollout, there are valid concerns that they may be used in ineffective or exclusionary ways that exacerbate inequality, allow for discrimination, violate privacy, and assume consent. While the present moment calls for urgency, we must also acknowledge that choices made in the vaccine credentialing rollout for COVID-19 are likely to have long-term implications, and must be made with care. In this paper, we outline potential implementation and ethical concerns that may arise from tech-enabled vaccine credentialing programs now and in the future, and discuss the technological tradeoffs implicated in these concerns. We suggest a set of principles that, if adopted, may mitigate these concerns, forestall preventable harms, and point the way forward; the paper is structured as a deep dive into each of these principles. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.13515v1-abstract-full').style.display = 'none'; document.getElementById('2105.13515v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2008.05300">arXiv:2008.05300</a> <span> [<a href="https://arxiv.org/pdf/2008.05300">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Who Watches the Watchmen? A Review of Subjective Approaches for Sybil-resistance in Proof of Personhood Protocols </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Siddarth%2C+D">Divya Siddarth</a>, <a href="/search/cs?searchtype=author&query=Ivliev%2C+S">Sergey Ivliev</a>, <a href="/search/cs?searchtype=author&query=Siri%2C+S">Santiago Siri</a>, <a href="/search/cs?searchtype=author&query=Berman%2C+P">Paula Berman</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2008.05300v5-abstract-short" style="display: inline;"> Most current self-sovereign identity systems may be categorized as strictly objective, consisting of cryptographically signed statements issued by trusted third party attestors. This failure to provide an input for subjectivity accounts for a central challenge: the inability to address the question of "Who verifies the verifier?". Instead, these protocols outsource their legitimacy to mechanisms b… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.05300v5-abstract-full').style.display = 'inline'; document.getElementById('2008.05300v5-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2008.05300v5-abstract-full" style="display: none;"> Most current self-sovereign identity systems may be categorized as strictly objective, consisting of cryptographically signed statements issued by trusted third party attestors. This failure to provide an input for subjectivity accounts for a central challenge: the inability to address the question of "Who verifies the verifier?". Instead, these protocols outsource their legitimacy to mechanisms beyond their internal structure, relying on traditional centralized institutions such as national ID issuers and KYC providers to verify the claims they hold. This reliance has been employed to safeguard applications from a vulnerability previously thought to be impossible to address in distributed systems: the Sybil attack problem, which describes the abuse of an online system by creating many illegitimate virtual personas. Inspired by the progress in cryptocurrencies and blockchain technology, there has recently been a surge in networked protocols that make use of subjective inputs such as voting, vouching, and interpreting, to arrive at a decentralized and sybil-resistant consensus for identity. In this article, we will outline the approaches of these new and natively digital sources of authentication -- their attributes, methodologies strengths, and weaknesses -- and sketch out possible directions for future developments. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.05300v5-abstract-full').style.display = 'none'; document.getElementById('2008.05300v5-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2008.03263">arXiv:2008.03263</a> <span> [<a href="https://arxiv.org/pdf/2008.03263">pdf</a>, <a href="https://arxiv.org/format/2008.03263">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> </div> </div> <p class="title is-5 mathjax"> COVID, BLM, and the polarization of US politicians on Twitter </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Panda%2C+A">Anmol Panda</a>, <a href="/search/cs?searchtype=author&query=Siddarth%2C+D">Divya Siddarth</a>, <a href="/search/cs?searchtype=author&query=Pal%2C+J">Joyojeet Pal</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2008.03263v1-abstract-short" style="display: inline;"> We mapped the tweets of 520 US Congress members, focusing on analyzing their engagement with two broad topics: first, the COVID-19 pandemic, and second, the recent wave of anti-racist protest. We find that, in discussing COVID-19, Democrats frame the issue in terms of public health, while Republicans are more likely to focus on small businesses and the economy. When looking at the discourse around… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.03263v1-abstract-full').style.display = 'inline'; document.getElementById('2008.03263v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2008.03263v1-abstract-full" style="display: none;"> We mapped the tweets of 520 US Congress members, focusing on analyzing their engagement with two broad topics: first, the COVID-19 pandemic, and second, the recent wave of anti-racist protest. We find that, in discussing COVID-19, Democrats frame the issue in terms of public health, while Republicans are more likely to focus on small businesses and the economy. When looking at the discourse around anti-Black violence, we find that Democrats are far more likely to name police brutality as a specific concern. In contrast, Republicans not only discuss the issue far less, but also keep their terms more general, as well as criticizing perceived protest violence. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.03263v1-abstract-full').style.display = 'none'; document.getElementById('2008.03263v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 August, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 6 figures</span> </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>