CINXE.COM

Existential risk from artificial intelligence - Wikipedia

<!DOCTYPE html> <html class="client-nojs vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-sticky-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-custom-font-size-clientpref-1 vector-feature-appearance-pinned-clientpref-1 vector-feature-night-mode-enabled skin-theme-clientpref-day vector-toc-available" lang="en" dir="ltr"> <head> <meta charset="UTF-8"> <title>Existential risk from artificial intelligence - Wikipedia</title> <script>(function(){var className="client-js vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-sticky-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-custom-font-size-clientpref-1 vector-feature-appearance-pinned-clientpref-1 vector-feature-night-mode-enabled skin-theme-clientpref-day vector-toc-available";var cookie=document.cookie.match(/(?:^|; )enwikimwclientpreferences=([^;]+)/);if(cookie){cookie[1].split('%2C').forEach(function(pref){className=className.replace(new RegExp('(^| )'+pref.replace(/-clientpref-\w+$|[^\w-]+/g,'')+'-clientpref-\\w+( |$)'),'$1'+pref+'$2');});}document.documentElement.className=className;}());RLCONF={"wgBreakFrames":false,"wgSeparatorTransformTable":["",""],"wgDigitTransformTable":["",""],"wgDefaultDateFormat":"dmy", "wgMonthNames":["","January","February","March","April","May","June","July","August","September","October","November","December"],"wgRequestId":"760be504-ac2f-41f8-b297-5ec9cd194dbf","wgCanonicalNamespace":"","wgCanonicalSpecialPageName":false,"wgNamespaceNumber":0,"wgPageName":"Existential_risk_from_artificial_intelligence","wgTitle":"Existential risk from artificial intelligence","wgCurRevisionId":1257846244,"wgRevisionId":1257846244,"wgArticleId":46583121,"wgIsArticle":true,"wgIsRedirect":false,"wgAction":"view","wgUserName":null,"wgUserGroups":["*"],"wgCategories":["Webarchive template wayback links","Articles with short description","Short description is different from Wikidata","Use dmy dates from May 2018","Existential risk from artificial general intelligence","Future problems","Human extinction","AI safety","Technology hazards","Doomsday scenarios"],"wgPageViewLanguage":"en","wgPageContentLanguage":"en","wgPageContentModel":"wikitext","wgRelevantPageName": "Existential_risk_from_artificial_intelligence","wgRelevantArticleId":46583121,"wgIsProbablyEditable":true,"wgRelevantPageIsProbablyEditable":true,"wgRestrictionEdit":[],"wgRestrictionMove":[],"wgRedirectedFrom":"Existential_risk_from_artificial_general_intelligence","wgNoticeProject":"wikipedia","wgCiteReferencePreviewsActive":false,"wgFlaggedRevsParams":{"tags":{"status":{"levels":1}}},"wgMediaViewerOnClick":true,"wgMediaViewerEnabledByDefault":true,"wgPopupsFlags":0,"wgVisualEditor":{"pageLanguageCode":"en","pageLanguageDir":"ltr","pageVariantFallbacks":"en"},"wgMFDisplayWikibaseDescriptions":{"search":true,"watchlist":true,"tagline":false,"nearby":true},"wgWMESchemaEditAttemptStepOversample":false,"wgWMEPageLength":100000,"wgInternalRedirectTargetUrl":"/wiki/Existential_risk_from_artificial_intelligence","wgRelatedArticlesCompat":[],"wgCentralAuthMobileDomain":false,"wgEditSubmitButtonLabelPublish":true,"wgULSPosition":"interlanguage","wgULSisCompactLinksEnabled":false, "wgVector2022LanguageInHeader":true,"wgULSisLanguageSelectorEmpty":false,"wgWikibaseItemId":"Q21715237","wgCheckUserClientHintsHeadersJsApi":["brands","architecture","bitness","fullVersionList","mobile","model","platform","platformVersion"],"GEHomepageSuggestedEditsEnableTopics":true,"wgGETopicsMatchModeEnabled":false,"wgGEStructuredTaskRejectionReasonTextInputEnabled":false,"wgGELevelingUpEnabledForUser":false};RLSTATE={"ext.globalCssJs.user.styles":"ready","site.styles":"ready","user.styles":"ready","ext.globalCssJs.user":"ready","user":"ready","user.options":"loading","ext.cite.styles":"ready","skins.vector.search.codex.styles":"ready","skins.vector.styles":"ready","skins.vector.icons":"ready","jquery.makeCollapsible.styles":"ready","ext.wikimediamessages.styles":"ready","ext.visualEditor.desktopArticleTarget.noscript":"ready","ext.uls.interlanguage":"ready","wikibase.client.init":"ready","ext.wikimediaBadges":"ready"};RLPAGEMODULES=["mediawiki.action.view.redirect", "ext.cite.ux-enhancements","mediawiki.page.media","ext.scribunto.logs","site","mediawiki.page.ready","jquery.makeCollapsible","mediawiki.toc","skins.vector.js","ext.centralNotice.geoIP","ext.centralNotice.startUp","ext.gadget.ReferenceTooltips","ext.gadget.switcher","ext.urlShortener.toolbar","ext.centralauth.centralautologin","mmv.bootstrap","ext.popups","ext.visualEditor.desktopArticleTarget.init","ext.visualEditor.targetLoader","ext.echo.centralauth","ext.eventLogging","ext.wikimediaEvents","ext.navigationTiming","ext.uls.interface","ext.cx.eventlogging.campaigns","ext.cx.uls.quick.actions","wikibase.client.vector-2022","ext.checkUser.clientHints","ext.quicksurveys.init","ext.growthExperiments.SuggestedEditSession","wikibase.sidebar.tracking"];</script> <script>(RLQ=window.RLQ||[]).push(function(){mw.loader.impl(function(){return["user.options@12s5i",function($,jQuery,require,module){mw.user.tokens.set({"patrolToken":"+\\","watchToken":"+\\","csrfToken":"+\\"}); }];});});</script> <link rel="stylesheet" href="/w/load.php?lang=en&amp;modules=ext.cite.styles%7Cext.uls.interlanguage%7Cext.visualEditor.desktopArticleTarget.noscript%7Cext.wikimediaBadges%7Cext.wikimediamessages.styles%7Cjquery.makeCollapsible.styles%7Cskins.vector.icons%2Cstyles%7Cskins.vector.search.codex.styles%7Cwikibase.client.init&amp;only=styles&amp;skin=vector-2022"> <script async="" src="/w/load.php?lang=en&amp;modules=startup&amp;only=scripts&amp;raw=1&amp;skin=vector-2022"></script> <meta name="ResourceLoaderDynamicStyles" content=""> <link rel="stylesheet" href="/w/load.php?lang=en&amp;modules=site.styles&amp;only=styles&amp;skin=vector-2022"> <meta name="generator" content="MediaWiki 1.44.0-wmf.4"> <meta name="referrer" content="origin"> <meta name="referrer" content="origin-when-cross-origin"> <meta name="robots" content="max-image-preview:standard"> <meta name="format-detection" content="telephone=no"> <meta name="viewport" content="width=1120"> <meta property="og:title" content="Existential risk from artificial intelligence - Wikipedia"> <meta property="og:type" content="website"> <link rel="preconnect" href="//upload.wikimedia.org"> <link rel="alternate" media="only screen and (max-width: 640px)" href="//en.m.wikipedia.org/wiki/Existential_risk_from_artificial_intelligence"> <link rel="alternate" type="application/x-wiki" title="Edit this page" href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit"> <link rel="apple-touch-icon" href="/static/apple-touch/wikipedia.png"> <link rel="icon" href="/static/favicon/wikipedia.ico"> <link rel="search" type="application/opensearchdescription+xml" href="/w/rest.php/v1/search" title="Wikipedia (en)"> <link rel="EditURI" type="application/rsd+xml" href="//en.wikipedia.org/w/api.php?action=rsd"> <link rel="canonical" href="https://en.wikipedia.org/wiki/Existential_risk_from_artificial_intelligence"> <link rel="license" href="https://creativecommons.org/licenses/by-sa/4.0/deed.en"> <link rel="alternate" type="application/atom+xml" title="Wikipedia Atom feed" href="/w/index.php?title=Special:RecentChanges&amp;feed=atom"> <link rel="dns-prefetch" href="//meta.wikimedia.org" /> <link rel="dns-prefetch" href="//login.wikimedia.org"> </head> <body class="skin--responsive skin-vector skin-vector-search-vue mediawiki ltr sitedir-ltr mw-hide-empty-elt ns-0 ns-subject mw-editable page-Existential_risk_from_artificial_intelligence rootpage-Existential_risk_from_artificial_intelligence skin-vector-2022 action-view"><a class="mw-jump-link" href="#bodyContent">Jump to content</a> <div class="vector-header-container"> <header class="vector-header mw-header"> <div class="vector-header-start"> <nav class="vector-main-menu-landmark" aria-label="Site"> <div id="vector-main-menu-dropdown" class="vector-dropdown vector-main-menu-dropdown vector-button-flush-left vector-button-flush-right" > <input type="checkbox" id="vector-main-menu-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-main-menu-dropdown" class="vector-dropdown-checkbox " aria-label="Main menu" > <label id="vector-main-menu-dropdown-label" for="vector-main-menu-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-menu mw-ui-icon-wikimedia-menu"></span> <span class="vector-dropdown-label-text">Main menu</span> </label> <div class="vector-dropdown-content"> <div id="vector-main-menu-unpinned-container" class="vector-unpinned-container"> <div id="vector-main-menu" class="vector-main-menu vector-pinnable-element"> <div class="vector-pinnable-header vector-main-menu-pinnable-header vector-pinnable-header-unpinned" data-feature-name="main-menu-pinned" data-pinnable-element-id="vector-main-menu" data-pinned-container-id="vector-main-menu-pinned-container" data-unpinned-container-id="vector-main-menu-unpinned-container" > <div class="vector-pinnable-header-label">Main menu</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-main-menu.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-main-menu.unpin">hide</button> </div> <div id="p-navigation" class="vector-menu mw-portlet mw-portlet-navigation" > <div class="vector-menu-heading"> Navigation </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-mainpage-description" class="mw-list-item"><a href="/wiki/Main_Page" title="Visit the main page [z]" accesskey="z"><span>Main page</span></a></li><li id="n-contents" class="mw-list-item"><a href="/wiki/Wikipedia:Contents" title="Guides to browsing Wikipedia"><span>Contents</span></a></li><li id="n-currentevents" class="mw-list-item"><a href="/wiki/Portal:Current_events" title="Articles related to current events"><span>Current events</span></a></li><li id="n-randompage" class="mw-list-item"><a href="/wiki/Special:Random" title="Visit a randomly selected article [x]" accesskey="x"><span>Random article</span></a></li><li id="n-aboutsite" class="mw-list-item"><a href="/wiki/Wikipedia:About" title="Learn about Wikipedia and how it works"><span>About Wikipedia</span></a></li><li id="n-contactpage" class="mw-list-item"><a href="//en.wikipedia.org/wiki/Wikipedia:Contact_us" title="How to contact Wikipedia"><span>Contact us</span></a></li> </ul> </div> </div> <div id="p-interaction" class="vector-menu mw-portlet mw-portlet-interaction" > <div class="vector-menu-heading"> Contribute </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-help" class="mw-list-item"><a href="/wiki/Help:Contents" title="Guidance on how to use and edit Wikipedia"><span>Help</span></a></li><li id="n-introduction" class="mw-list-item"><a href="/wiki/Help:Introduction" title="Learn how to edit Wikipedia"><span>Learn to edit</span></a></li><li id="n-portal" class="mw-list-item"><a href="/wiki/Wikipedia:Community_portal" title="The hub for editors"><span>Community portal</span></a></li><li id="n-recentchanges" class="mw-list-item"><a href="/wiki/Special:RecentChanges" title="A list of recent changes to Wikipedia [r]" accesskey="r"><span>Recent changes</span></a></li><li id="n-upload" class="mw-list-item"><a href="/wiki/Wikipedia:File_upload_wizard" title="Add images or other media for use on Wikipedia"><span>Upload file</span></a></li> </ul> </div> </div> </div> </div> </div> </div> </nav> <a href="/wiki/Main_Page" class="mw-logo"> <img class="mw-logo-icon" src="/static/images/icons/wikipedia.png" alt="" aria-hidden="true" height="50" width="50"> <span class="mw-logo-container skin-invert"> <img class="mw-logo-wordmark" alt="Wikipedia" src="/static/images/mobile/copyright/wikipedia-wordmark-en.svg" style="width: 7.5em; height: 1.125em;"> <img class="mw-logo-tagline" alt="The Free Encyclopedia" src="/static/images/mobile/copyright/wikipedia-tagline-en.svg" width="117" height="13" style="width: 7.3125em; height: 0.8125em;"> </span> </a> </div> <div class="vector-header-end"> <div id="p-search" role="search" class="vector-search-box-vue vector-search-box-collapses vector-search-box-show-thumbnail vector-search-box-auto-expand-width vector-search-box"> <a href="/wiki/Special:Search" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only search-toggle" title="Search Wikipedia [f]" accesskey="f"><span class="vector-icon mw-ui-icon-search mw-ui-icon-wikimedia-search"></span> <span>Search</span> </a> <div class="vector-typeahead-search-container"> <div class="cdx-typeahead-search cdx-typeahead-search--show-thumbnail cdx-typeahead-search--auto-expand-width"> <form action="/w/index.php" id="searchform" class="cdx-search-input cdx-search-input--has-end-button"> <div id="simpleSearch" class="cdx-search-input__input-wrapper" data-search-loc="header-moved"> <div class="cdx-text-input cdx-text-input--has-start-icon"> <input class="cdx-text-input__input" type="search" name="search" placeholder="Search Wikipedia" aria-label="Search Wikipedia" autocapitalize="sentences" title="Search Wikipedia [f]" accesskey="f" id="searchInput" > <span class="cdx-text-input__icon cdx-text-input__start-icon"></span> </div> <input type="hidden" name="title" value="Special:Search"> </div> <button class="cdx-button cdx-search-input__end-button">Search</button> </form> </div> </div> </div> <nav class="vector-user-links vector-user-links-wide" aria-label="Personal tools"> <div class="vector-user-links-main"> <div id="p-vector-user-menu-preferences" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <div id="p-vector-user-menu-userpage" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <nav class="vector-appearance-landmark" aria-label="Appearance"> <div id="vector-appearance-dropdown" class="vector-dropdown " title="Change the appearance of the page&#039;s font size, width, and color" > <input type="checkbox" id="vector-appearance-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-appearance-dropdown" class="vector-dropdown-checkbox " aria-label="Appearance" > <label id="vector-appearance-dropdown-label" for="vector-appearance-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-appearance mw-ui-icon-wikimedia-appearance"></span> <span class="vector-dropdown-label-text">Appearance</span> </label> <div class="vector-dropdown-content"> <div id="vector-appearance-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <div id="p-vector-user-menu-notifications" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <div id="p-vector-user-menu-overflow" class="vector-menu mw-portlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-sitesupport-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="https://donate.wikimedia.org/wiki/Special:FundraiserRedirector?utm_source=donate&amp;utm_medium=sidebar&amp;utm_campaign=C13_en.wikipedia.org&amp;uselang=en" class=""><span>Donate</span></a> </li> <li id="pt-createaccount-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:CreateAccount&amp;returnto=Existential+risk+from+artificial+intelligence" title="You are encouraged to create an account and log in; however, it is not mandatory" class=""><span>Create account</span></a> </li> <li id="pt-login-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:UserLogin&amp;returnto=Existential+risk+from+artificial+intelligence" title="You&#039;re encouraged to log in; however, it&#039;s not mandatory. [o]" accesskey="o" class=""><span>Log in</span></a> </li> </ul> </div> </div> </div> <div id="vector-user-links-dropdown" class="vector-dropdown vector-user-menu vector-button-flush-right vector-user-menu-logged-out" title="Log in and more options" > <input type="checkbox" id="vector-user-links-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-user-links-dropdown" class="vector-dropdown-checkbox " aria-label="Personal tools" > <label id="vector-user-links-dropdown-label" for="vector-user-links-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-ellipsis mw-ui-icon-wikimedia-ellipsis"></span> <span class="vector-dropdown-label-text">Personal tools</span> </label> <div class="vector-dropdown-content"> <div id="p-personal" class="vector-menu mw-portlet mw-portlet-personal user-links-collapsible-item" title="User menu" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-sitesupport" class="user-links-collapsible-item mw-list-item"><a href="https://donate.wikimedia.org/wiki/Special:FundraiserRedirector?utm_source=donate&amp;utm_medium=sidebar&amp;utm_campaign=C13_en.wikipedia.org&amp;uselang=en"><span>Donate</span></a></li><li id="pt-createaccount" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:CreateAccount&amp;returnto=Existential+risk+from+artificial+intelligence" title="You are encouraged to create an account and log in; however, it is not mandatory"><span class="vector-icon mw-ui-icon-userAdd mw-ui-icon-wikimedia-userAdd"></span> <span>Create account</span></a></li><li id="pt-login" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:UserLogin&amp;returnto=Existential+risk+from+artificial+intelligence" title="You&#039;re encouraged to log in; however, it&#039;s not mandatory. [o]" accesskey="o"><span class="vector-icon mw-ui-icon-logIn mw-ui-icon-wikimedia-logIn"></span> <span>Log in</span></a></li> </ul> </div> </div> <div id="p-user-menu-anon-editor" class="vector-menu mw-portlet mw-portlet-user-menu-anon-editor" > <div class="vector-menu-heading"> Pages for logged out editors <a href="/wiki/Help:Introduction" aria-label="Learn more about editing"><span>learn more</span></a> </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-anoncontribs" class="mw-list-item"><a href="/wiki/Special:MyContributions" title="A list of edits made from this IP address [y]" accesskey="y"><span>Contributions</span></a></li><li id="pt-anontalk" class="mw-list-item"><a href="/wiki/Special:MyTalk" title="Discussion about edits from this IP address [n]" accesskey="n"><span>Talk</span></a></li> </ul> </div> </div> </div> </div> </nav> </div> </header> </div> <div class="mw-page-container"> <div class="mw-page-container-inner"> <div class="vector-sitenotice-container"> <div id="siteNotice"><!-- CentralNotice --></div> </div> <div class="vector-column-start"> <div class="vector-main-menu-container"> <div id="mw-navigation"> <nav id="mw-panel" class="vector-main-menu-landmark" aria-label="Site"> <div id="vector-main-menu-pinned-container" class="vector-pinned-container"> </div> </nav> </div> </div> <div class="vector-sticky-pinned-container"> <nav id="mw-panel-toc" aria-label="Contents" data-event-name="ui.sidebar-toc" class="mw-table-of-contents-container vector-toc-landmark"> <div id="vector-toc-pinned-container" class="vector-pinned-container"> <div id="vector-toc" class="vector-toc vector-pinnable-element"> <div class="vector-pinnable-header vector-toc-pinnable-header vector-pinnable-header-pinned" data-feature-name="toc-pinned" data-pinnable-element-id="vector-toc" > <h2 class="vector-pinnable-header-label">Contents</h2> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-toc.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-toc.unpin">hide</button> </div> <ul class="vector-toc-contents" id="mw-panel-toc-list"> <li id="toc-mw-content-text" class="vector-toc-list-item vector-toc-level-1"> <a href="#" class="vector-toc-link"> <div class="vector-toc-text">(Top)</div> </a> </li> <li id="toc-History" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#History"> <div class="vector-toc-text"> <span class="vector-toc-numb">1</span> <span>History</span> </div> </a> <ul id="toc-History-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Potential_AI_capabilities" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Potential_AI_capabilities"> <div class="vector-toc-text"> <span class="vector-toc-numb">2</span> <span>Potential AI capabilities</span> </div> </a> <button aria-controls="toc-Potential_AI_capabilities-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Potential AI capabilities subsection</span> </button> <ul id="toc-Potential_AI_capabilities-sublist" class="vector-toc-list"> <li id="toc-General_Intelligence" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#General_Intelligence"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.1</span> <span>General Intelligence</span> </div> </a> <ul id="toc-General_Intelligence-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Superintelligence" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Superintelligence"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.2</span> <span>Superintelligence</span> </div> </a> <ul id="toc-Superintelligence-sublist" class="vector-toc-list"> <li id="toc-Comparison_with_humans" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Comparison_with_humans"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.2.1</span> <span>Comparison with humans</span> </div> </a> <ul id="toc-Comparison_with_humans-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Intelligence_explosion" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Intelligence_explosion"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.2.2</span> <span>Intelligence explosion</span> </div> </a> <ul id="toc-Intelligence_explosion-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Alien_mind" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Alien_mind"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.2.3</span> <span>Alien mind</span> </div> </a> <ul id="toc-Alien_mind-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Limits" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Limits"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.2.4</span> <span>Limits</span> </div> </a> <ul id="toc-Limits-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Dangerous_capabilities" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Dangerous_capabilities"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.3</span> <span>Dangerous capabilities</span> </div> </a> <ul id="toc-Dangerous_capabilities-sublist" class="vector-toc-list"> <li id="toc-Social_manipulation" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Social_manipulation"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.3.1</span> <span>Social manipulation</span> </div> </a> <ul id="toc-Social_manipulation-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Cyberattacks" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Cyberattacks"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.3.2</span> <span>Cyberattacks</span> </div> </a> <ul id="toc-Cyberattacks-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Enhanced_pathogens" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Enhanced_pathogens"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.3.3</span> <span>Enhanced pathogens</span> </div> </a> <ul id="toc-Enhanced_pathogens-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-AI_arms_race" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#AI_arms_race"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.4</span> <span>AI arms race</span> </div> </a> <ul id="toc-AI_arms_race-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Types_of_existential_risk" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Types_of_existential_risk"> <div class="vector-toc-text"> <span class="vector-toc-numb">3</span> <span>Types of existential risk</span> </div> </a> <ul id="toc-Types_of_existential_risk-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-AI_alignment" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#AI_alignment"> <div class="vector-toc-text"> <span class="vector-toc-numb">4</span> <span>AI alignment</span> </div> </a> <button aria-controls="toc-AI_alignment-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle AI alignment subsection</span> </button> <ul id="toc-AI_alignment-sublist" class="vector-toc-list"> <li id="toc-Instrumental_convergence" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Instrumental_convergence"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.1</span> <span>Instrumental convergence</span> </div> </a> <ul id="toc-Instrumental_convergence-sublist" class="vector-toc-list"> <li id="toc-Resistance_to_changing_goals" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Resistance_to_changing_goals"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.1.1</span> <span>Resistance to changing goals</span> </div> </a> <ul id="toc-Resistance_to_changing_goals-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Difficulty_of_specifying_goals" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Difficulty_of_specifying_goals"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.2</span> <span>Difficulty of specifying goals</span> </div> </a> <ul id="toc-Difficulty_of_specifying_goals-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Alignment_of_superintelligences" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Alignment_of_superintelligences"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.3</span> <span>Alignment of superintelligences</span> </div> </a> <ul id="toc-Alignment_of_superintelligences-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Difficulty_of_making_a_flawless_design" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Difficulty_of_making_a_flawless_design"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.4</span> <span>Difficulty of making a flawless design</span> </div> </a> <ul id="toc-Difficulty_of_making_a_flawless_design-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Orthogonality_thesis" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Orthogonality_thesis"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.5</span> <span>Orthogonality thesis</span> </div> </a> <ul id="toc-Orthogonality_thesis-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Anthropomorphic_arguments" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Anthropomorphic_arguments"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.6</span> <span>Anthropomorphic arguments</span> </div> </a> <ul id="toc-Anthropomorphic_arguments-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Other_sources_of_risk" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Other_sources_of_risk"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.7</span> <span>Other sources of risk</span> </div> </a> <ul id="toc-Other_sources_of_risk-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Scenarios" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Scenarios"> <div class="vector-toc-text"> <span class="vector-toc-numb">5</span> <span>Scenarios</span> </div> </a> <button aria-controls="toc-Scenarios-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Scenarios subsection</span> </button> <ul id="toc-Scenarios-sublist" class="vector-toc-list"> <li id="toc-Treacherous_turn" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Treacherous_turn"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.1</span> <span>Treacherous turn</span> </div> </a> <ul id="toc-Treacherous_turn-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Life_3.0" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Life_3.0"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.2</span> <span>Life 3.0</span> </div> </a> <ul id="toc-Life_3.0-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Perspectives" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Perspectives"> <div class="vector-toc-text"> <span class="vector-toc-numb">6</span> <span>Perspectives</span> </div> </a> <button aria-controls="toc-Perspectives-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Perspectives subsection</span> </button> <ul id="toc-Perspectives-sublist" class="vector-toc-list"> <li id="toc-Endorsement" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Endorsement"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.1</span> <span>Endorsement</span> </div> </a> <ul id="toc-Endorsement-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Skepticism" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Skepticism"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.2</span> <span>Skepticism</span> </div> </a> <ul id="toc-Skepticism-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Popular_reaction" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Popular_reaction"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.3</span> <span>Popular reaction</span> </div> </a> <ul id="toc-Popular_reaction-sublist" class="vector-toc-list"> <li id="toc-Public_surveys" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Public_surveys"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.3.1</span> <span>Public surveys</span> </div> </a> <ul id="toc-Public_surveys-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> </ul> </li> <li id="toc-Mitigation" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Mitigation"> <div class="vector-toc-text"> <span class="vector-toc-numb">7</span> <span>Mitigation</span> </div> </a> <button aria-controls="toc-Mitigation-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Mitigation subsection</span> </button> <ul id="toc-Mitigation-sublist" class="vector-toc-list"> <li id="toc-Views_on_banning_and_regulation" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Views_on_banning_and_regulation"> <div class="vector-toc-text"> <span class="vector-toc-numb">7.1</span> <span>Views on banning and regulation</span> </div> </a> <ul id="toc-Views_on_banning_and_regulation-sublist" class="vector-toc-list"> <li id="toc-Banning" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Banning"> <div class="vector-toc-text"> <span class="vector-toc-numb">7.1.1</span> <span>Banning</span> </div> </a> <ul id="toc-Banning-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Regulation" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Regulation"> <div class="vector-toc-text"> <span class="vector-toc-numb">7.1.2</span> <span>Regulation</span> </div> </a> <ul id="toc-Regulation-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> </ul> </li> <li id="toc-See_also" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#See_also"> <div class="vector-toc-text"> <span class="vector-toc-numb">8</span> <span>See also</span> </div> </a> <ul id="toc-See_also-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Notes" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Notes"> <div class="vector-toc-text"> <span class="vector-toc-numb">9</span> <span>Notes</span> </div> </a> <ul id="toc-Notes-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-References" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#References"> <div class="vector-toc-text"> <span class="vector-toc-numb">10</span> <span>References</span> </div> </a> <ul id="toc-References-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Bibliography" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Bibliography"> <div class="vector-toc-text"> <span class="vector-toc-numb">11</span> <span>Bibliography</span> </div> </a> <ul id="toc-Bibliography-sublist" class="vector-toc-list"> </ul> </li> </ul> </div> </div> </nav> </div> </div> <div class="mw-content-container"> <main id="content" class="mw-body"> <header class="mw-body-header vector-page-titlebar"> <nav aria-label="Contents" class="vector-toc-landmark"> <div id="vector-page-titlebar-toc" class="vector-dropdown vector-page-titlebar-toc vector-button-flush-left" > <input type="checkbox" id="vector-page-titlebar-toc-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-titlebar-toc" class="vector-dropdown-checkbox " aria-label="Toggle the table of contents" > <label id="vector-page-titlebar-toc-label" for="vector-page-titlebar-toc-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-listBullet mw-ui-icon-wikimedia-listBullet"></span> <span class="vector-dropdown-label-text">Toggle the table of contents</span> </label> <div class="vector-dropdown-content"> <div id="vector-page-titlebar-toc-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <h1 id="firstHeading" class="firstHeading mw-first-heading"><span class="mw-page-title-main">Existential risk from artificial intelligence</span></h1> <div id="p-lang-btn" class="vector-dropdown mw-portlet mw-portlet-lang" > <input type="checkbox" id="p-lang-btn-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-p-lang-btn" class="vector-dropdown-checkbox mw-interlanguage-selector" aria-label="Go to an article in another language. Available in 19 languages" > <label id="p-lang-btn-label" for="p-lang-btn-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--action-progressive mw-portlet-lang-heading-19" aria-hidden="true" ><span class="vector-icon mw-ui-icon-language-progressive mw-ui-icon-wikimedia-language-progressive"></span> <span class="vector-dropdown-label-text">19 languages</span> </label> <div class="vector-dropdown-content"> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li class="interlanguage-link interwiki-af mw-list-item"><a href="https://af.wikipedia.org/wiki/Eksistensi%C3%ABle_risiko_as_gevolg_van_kunsmatige_algemene_intelligensie" title="Eksistensiële risiko as gevolg van kunsmatige algemene intelligensie – Afrikaans" lang="af" hreflang="af" data-title="Eksistensiële risiko as gevolg van kunsmatige algemene intelligensie" data-language-autonym="Afrikaans" data-language-local-name="Afrikaans" class="interlanguage-link-target"><span>Afrikaans</span></a></li><li class="interlanguage-link interwiki-ar mw-list-item"><a href="https://ar.wikipedia.org/wiki/%D8%A7%D9%84%D8%AE%D8%B7%D8%B1_%D8%A7%D9%84%D9%88%D8%AC%D9%88%D8%AF%D9%8A_%D9%85%D9%86_%D8%A7%D9%84%D8%B0%D9%83%D8%A7%D8%A1_%D8%A7%D9%84%D8%A7%D8%B5%D8%B7%D9%86%D8%A7%D8%B9%D9%8A" title="الخطر الوجودي من الذكاء الاصطناعي – Arabic" lang="ar" hreflang="ar" data-title="الخطر الوجودي من الذكاء الاصطناعي" data-language-autonym="العربية" data-language-local-name="Arabic" class="interlanguage-link-target"><span>العربية</span></a></li><li class="interlanguage-link interwiki-az mw-list-item"><a href="https://az.wikipedia.org/wiki/S%C3%BCni_intellektin_ekzistensial_riski" title="Süni intellektin ekzistensial riski – Azerbaijani" lang="az" hreflang="az" data-title="Süni intellektin ekzistensial riski" data-language-autonym="Azərbaycanca" data-language-local-name="Azerbaijani" class="interlanguage-link-target"><span>Azərbaycanca</span></a></li><li class="interlanguage-link interwiki-cs mw-list-item"><a href="https://cs.wikipedia.org/wiki/Existen%C4%8Dn%C3%AD_rizika_v%C3%BDvoje_um%C4%9Bl%C3%A9_inteligence" title="Existenční rizika vývoje umělé inteligence – Czech" lang="cs" hreflang="cs" data-title="Existenční rizika vývoje umělé inteligence" data-language-autonym="Čeština" data-language-local-name="Czech" class="interlanguage-link-target"><span>Čeština</span></a></li><li class="interlanguage-link interwiki-de mw-list-item"><a href="https://de.wikipedia.org/wiki/Existenzielles_Risiko_durch_k%C3%BCnstliche_Intelligenz" title="Existenzielles Risiko durch künstliche Intelligenz – German" lang="de" hreflang="de" data-title="Existenzielles Risiko durch künstliche Intelligenz" data-language-autonym="Deutsch" data-language-local-name="German" class="interlanguage-link-target"><span>Deutsch</span></a></li><li class="interlanguage-link interwiki-es mw-list-item"><a href="https://es.wikipedia.org/wiki/Riesgo_existencial_de_la_inteligencia_artificial" title="Riesgo existencial de la inteligencia artificial – Spanish" lang="es" hreflang="es" data-title="Riesgo existencial de la inteligencia artificial" data-language-autonym="Español" data-language-local-name="Spanish" class="interlanguage-link-target"><span>Español</span></a></li><li class="interlanguage-link interwiki-fa mw-list-item"><a href="https://fa.wikipedia.org/wiki/%D8%AE%D8%B7%D8%B1_%D9%88%D8%AC%D9%88%D8%AF%DB%8C_%D9%86%D8%A7%D8%B4%DB%8C_%D8%A7%D8%B2_%D9%87%D9%88%D8%B4_%D8%AC%D8%A7%D9%85%D8%B9_%D9%85%D8%B5%D9%86%D9%88%D8%B9%DB%8C" title="خطر وجودی ناشی از هوش جامع مصنوعی – Persian" lang="fa" hreflang="fa" data-title="خطر وجودی ناشی از هوش جامع مصنوعی" data-language-autonym="فارسی" data-language-local-name="Persian" class="interlanguage-link-target"><span>فارسی</span></a></li><li class="interlanguage-link interwiki-fr mw-list-item"><a href="https://fr.wikipedia.org/wiki/Risque_existentiel_pos%C3%A9_par_l%27intelligence_artificielle" title="Risque existentiel posé par l&#039;intelligence artificielle – French" lang="fr" hreflang="fr" data-title="Risque existentiel posé par l&#039;intelligence artificielle" data-language-autonym="Français" data-language-local-name="French" class="interlanguage-link-target"><span>Français</span></a></li><li class="interlanguage-link interwiki-ko mw-list-item"><a href="https://ko.wikipedia.org/wiki/%EC%9D%B8%EA%B3%B5_%EC%9D%BC%EB%B0%98_%EC%A7%80%EB%8A%A5%EC%9D%98_%EC%8B%A4%EC%A1%B4%EC%A0%81_%EC%9C%84%ED%97%98" title="인공 일반 지능의 실존적 위험 – Korean" lang="ko" hreflang="ko" data-title="인공 일반 지능의 실존적 위험" data-language-autonym="한국어" data-language-local-name="Korean" class="interlanguage-link-target"><span>한국어</span></a></li><li class="interlanguage-link interwiki-id mw-list-item"><a href="https://id.wikipedia.org/wiki/Krisis_eksistensial_dari_kecerdasan_buatan" title="Krisis eksistensial dari kecerdasan buatan – Indonesian" lang="id" hreflang="id" data-title="Krisis eksistensial dari kecerdasan buatan" data-language-autonym="Bahasa Indonesia" data-language-local-name="Indonesian" class="interlanguage-link-target"><span>Bahasa Indonesia</span></a></li><li class="interlanguage-link interwiki-he mw-list-item"><a href="https://he.wikipedia.org/wiki/%D7%A1%D7%9B%D7%A0%D7%94_%D7%A7%D7%99%D7%95%D7%9E%D7%99%D7%AA_%D7%9E%D7%91%D7%99%D7%A0%D7%94_%D7%9E%D7%9C%D7%90%D7%9B%D7%95%D7%AA%D7%99%D7%AA_%D7%9B%D7%9C%D7%9C%D7%99%D7%AA" title="סכנה קיומית מבינה מלאכותית כללית – Hebrew" lang="he" hreflang="he" data-title="סכנה קיומית מבינה מלאכותית כללית" data-language-autonym="עברית" data-language-local-name="Hebrew" class="interlanguage-link-target"><span>עברית</span></a></li><li class="interlanguage-link interwiki-ja mw-list-item"><a href="https://ja.wikipedia.org/wiki/%E6%B1%8E%E7%94%A8%E4%BA%BA%E5%B7%A5%E7%9F%A5%E8%83%BD%E3%81%AB%E3%82%88%E3%82%8B%E4%BA%BA%E9%A1%9E%E6%BB%85%E4%BA%A1%E3%81%AE%E3%83%AA%E3%82%B9%E3%82%AF" title="汎用人工知能による人類滅亡のリスク – Japanese" lang="ja" hreflang="ja" data-title="汎用人工知能による人類滅亡のリスク" data-language-autonym="日本語" data-language-local-name="Japanese" class="interlanguage-link-target"><span>日本語</span></a></li><li class="interlanguage-link interwiki-ps mw-list-item"><a href="https://ps.wikipedia.org/wiki/%D9%84%D9%87_%D9%85%D8%B5%D9%86%D9%88%D8%B9%D9%8A_%DA%81%D9%8A%D8%B1%DA%A9%D8%AA%D9%8A%D8%A7_%D9%BE%DB%90%DA%9A%DB%90%D8%AF%D9%88%D9%86%DA%A9%DB%8C_%DA%AB%D9%88%D8%A7%DA%9A" title="له مصنوعي ځيرکتيا پېښېدونکی ګواښ – Pashto" lang="ps" hreflang="ps" data-title="له مصنوعي ځيرکتيا پېښېدونکی ګواښ" data-language-autonym="پښتو" data-language-local-name="Pashto" class="interlanguage-link-target"><span>پښتو</span></a></li><li class="interlanguage-link interwiki-pt mw-list-item"><a href="https://pt.wikipedia.org/wiki/Risco_existencial_da_intelig%C3%AAncia_artificial_geral" title="Risco existencial da inteligência artificial geral – Portuguese" lang="pt" hreflang="pt" data-title="Risco existencial da inteligência artificial geral" data-language-autonym="Português" data-language-local-name="Portuguese" class="interlanguage-link-target"><span>Português</span></a></li><li class="interlanguage-link interwiki-ro mw-list-item"><a href="https://ro.wikipedia.org/wiki/Risc_existen%C8%9Bial_cauzat_de_inteligen%C8%9Ba_artificial%C4%83_puternic%C4%83" title="Risc existențial cauzat de inteligența artificială puternică – Romanian" lang="ro" hreflang="ro" data-title="Risc existențial cauzat de inteligența artificială puternică" data-language-autonym="Română" data-language-local-name="Romanian" class="interlanguage-link-target"><span>Română</span></a></li><li class="interlanguage-link interwiki-sr mw-list-item"><a href="https://sr.wikipedia.org/wiki/Egzistencijalni_rizik_od_op%C5%A1te_ve%C5%A1ta%C4%8Dke_inteligencije" title="Egzistencijalni rizik od opšte veštačke inteligencije – Serbian" lang="sr" hreflang="sr" data-title="Egzistencijalni rizik od opšte veštačke inteligencije" data-language-autonym="Српски / srpski" data-language-local-name="Serbian" class="interlanguage-link-target"><span>Српски / srpski</span></a></li><li class="interlanguage-link interwiki-sv badge-Q70893996 mw-list-item" title=""><a href="https://sv.wikipedia.org/wiki/Existentiell_risk_orsakad_av_artificiell_generell_intelligens" title="Existentiell risk orsakad av artificiell generell intelligens – Swedish" lang="sv" hreflang="sv" data-title="Existentiell risk orsakad av artificiell generell intelligens" data-language-autonym="Svenska" data-language-local-name="Swedish" class="interlanguage-link-target"><span>Svenska</span></a></li><li class="interlanguage-link interwiki-tr mw-list-item"><a href="https://tr.wikipedia.org/wiki/Yapay_genel_zek%C3%A2dan_kaynaklanan_varolu%C5%9Fsal_risk" title="Yapay genel zekâdan kaynaklanan varoluşsal risk – Turkish" lang="tr" hreflang="tr" data-title="Yapay genel zekâdan kaynaklanan varoluşsal risk" data-language-autonym="Türkçe" data-language-local-name="Turkish" class="interlanguage-link-target"><span>Türkçe</span></a></li><li class="interlanguage-link interwiki-zh mw-list-item"><a href="https://zh.wikipedia.org/wiki/%E9%80%9A%E7%94%A8%E4%BA%BA%E5%B7%A5%E6%99%BA%E8%83%BD%E7%9A%84%E7%94%9F%E5%AD%98%E9%A3%8E%E9%99%A9" title="通用人工智能的生存风险 – Chinese" lang="zh" hreflang="zh" data-title="通用人工智能的生存风险" data-language-autonym="中文" data-language-local-name="Chinese" class="interlanguage-link-target"><span>中文</span></a></li> </ul> <div class="after-portlet after-portlet-lang"><span class="wb-langlinks-edit wb-langlinks-link"><a href="https://www.wikidata.org/wiki/Special:EntityPage/Q21715237#sitelinks-wikipedia" title="Edit interlanguage links" class="wbc-editpage">Edit links</a></span></div> </div> </div> </div> </header> <div class="vector-page-toolbar"> <div class="vector-page-toolbar-container"> <div id="left-navigation"> <nav aria-label="Namespaces"> <div id="p-associated-pages" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-associated-pages" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-nstab-main" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/Existential_risk_from_artificial_intelligence" title="View the content page [c]" accesskey="c"><span>Article</span></a></li><li id="ca-talk" class="vector-tab-noicon mw-list-item"><a href="/wiki/Talk:Existential_risk_from_artificial_intelligence" rel="discussion" title="Discuss improvements to the content page [t]" accesskey="t"><span>Talk</span></a></li> </ul> </div> </div> <div id="vector-variants-dropdown" class="vector-dropdown emptyPortlet" > <input type="checkbox" id="vector-variants-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-variants-dropdown" class="vector-dropdown-checkbox " aria-label="Change language variant" > <label id="vector-variants-dropdown-label" for="vector-variants-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">English</span> </label> <div class="vector-dropdown-content"> <div id="p-variants" class="vector-menu mw-portlet mw-portlet-variants emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> </div> </div> </nav> </div> <div id="right-navigation" class="vector-collapsible"> <nav aria-label="Views"> <div id="p-views" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-views" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-view" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/Existential_risk_from_artificial_intelligence"><span>Read</span></a></li><li id="ca-edit" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit" title="Edit this page [e]" accesskey="e"><span>Edit</span></a></li><li id="ca-history" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=history" title="Past revisions of this page [h]" accesskey="h"><span>View history</span></a></li> </ul> </div> </div> </nav> <nav class="vector-page-tools-landmark" aria-label="Page tools"> <div id="vector-page-tools-dropdown" class="vector-dropdown vector-page-tools-dropdown" > <input type="checkbox" id="vector-page-tools-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-tools-dropdown" class="vector-dropdown-checkbox " aria-label="Tools" > <label id="vector-page-tools-dropdown-label" for="vector-page-tools-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">Tools</span> </label> <div class="vector-dropdown-content"> <div id="vector-page-tools-unpinned-container" class="vector-unpinned-container"> <div id="vector-page-tools" class="vector-page-tools vector-pinnable-element"> <div class="vector-pinnable-header vector-page-tools-pinnable-header vector-pinnable-header-unpinned" data-feature-name="page-tools-pinned" data-pinnable-element-id="vector-page-tools" data-pinned-container-id="vector-page-tools-pinned-container" data-unpinned-container-id="vector-page-tools-unpinned-container" > <div class="vector-pinnable-header-label">Tools</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-page-tools.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-page-tools.unpin">hide</button> </div> <div id="p-cactions" class="vector-menu mw-portlet mw-portlet-cactions emptyPortlet vector-has-collapsible-items" title="More options" > <div class="vector-menu-heading"> Actions </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-more-view" class="selected vector-more-collapsible-item mw-list-item"><a href="/wiki/Existential_risk_from_artificial_intelligence"><span>Read</span></a></li><li id="ca-more-edit" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit" title="Edit this page [e]" accesskey="e"><span>Edit</span></a></li><li id="ca-more-history" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=history"><span>View history</span></a></li> </ul> </div> </div> <div id="p-tb" class="vector-menu mw-portlet mw-portlet-tb" > <div class="vector-menu-heading"> General </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="t-whatlinkshere" class="mw-list-item"><a href="/wiki/Special:WhatLinksHere/Existential_risk_from_artificial_intelligence" title="List of all English Wikipedia pages containing links to this page [j]" accesskey="j"><span>What links here</span></a></li><li id="t-recentchangeslinked" class="mw-list-item"><a href="/wiki/Special:RecentChangesLinked/Existential_risk_from_artificial_intelligence" rel="nofollow" title="Recent changes in pages linked from this page [k]" accesskey="k"><span>Related changes</span></a></li><li id="t-upload" class="mw-list-item"><a href="/wiki/Wikipedia:File_Upload_Wizard" title="Upload files [u]" accesskey="u"><span>Upload file</span></a></li><li id="t-specialpages" class="mw-list-item"><a href="/wiki/Special:SpecialPages" title="A list of all special pages [q]" accesskey="q"><span>Special pages</span></a></li><li id="t-permalink" class="mw-list-item"><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;oldid=1257846244" title="Permanent link to this revision of this page"><span>Permanent link</span></a></li><li id="t-info" class="mw-list-item"><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=info" title="More information about this page"><span>Page information</span></a></li><li id="t-cite" class="mw-list-item"><a href="/w/index.php?title=Special:CiteThisPage&amp;page=Existential_risk_from_artificial_intelligence&amp;id=1257846244&amp;wpFormIdentifier=titleform" title="Information on how to cite this page"><span>Cite this page</span></a></li><li id="t-urlshortener" class="mw-list-item"><a href="/w/index.php?title=Special:UrlShortener&amp;url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FExistential_risk_from_artificial_intelligence"><span>Get shortened URL</span></a></li><li id="t-urlshortener-qrcode" class="mw-list-item"><a href="/w/index.php?title=Special:QrCode&amp;url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FExistential_risk_from_artificial_intelligence"><span>Download QR code</span></a></li> </ul> </div> </div> <div id="p-coll-print_export" class="vector-menu mw-portlet mw-portlet-coll-print_export" > <div class="vector-menu-heading"> Print/export </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="coll-download-as-rl" class="mw-list-item"><a href="/w/index.php?title=Special:DownloadAsPdf&amp;page=Existential_risk_from_artificial_intelligence&amp;action=show-download-screen" title="Download this page as a PDF file"><span>Download as PDF</span></a></li><li id="t-print" class="mw-list-item"><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;printable=yes" title="Printable version of this page [p]" accesskey="p"><span>Printable version</span></a></li> </ul> </div> </div> <div id="p-wikibase-otherprojects" class="vector-menu mw-portlet mw-portlet-wikibase-otherprojects" > <div class="vector-menu-heading"> In other projects </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="t-wikibase" class="wb-otherproject-link wb-otherproject-wikibase-dataitem mw-list-item"><a href="https://www.wikidata.org/wiki/Special:EntityPage/Q21715237" title="Structured data on this page hosted by Wikidata [g]" accesskey="g"><span>Wikidata item</span></a></li> </ul> </div> </div> </div> </div> </div> </div> </nav> </div> </div> </div> <div class="vector-column-end"> <div class="vector-sticky-pinned-container"> <nav class="vector-page-tools-landmark" aria-label="Page tools"> <div id="vector-page-tools-pinned-container" class="vector-pinned-container"> </div> </nav> <nav class="vector-appearance-landmark" aria-label="Appearance"> <div id="vector-appearance-pinned-container" class="vector-pinned-container"> <div id="vector-appearance" class="vector-appearance vector-pinnable-element"> <div class="vector-pinnable-header vector-appearance-pinnable-header vector-pinnable-header-pinned" data-feature-name="appearance-pinned" data-pinnable-element-id="vector-appearance" data-pinned-container-id="vector-appearance-pinned-container" data-unpinned-container-id="vector-appearance-unpinned-container" > <div class="vector-pinnable-header-label">Appearance</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-appearance.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-appearance.unpin">hide</button> </div> </div> </div> </nav> </div> </div> <div id="bodyContent" class="vector-body" aria-labelledby="firstHeading" data-mw-ve-target-container> <div class="vector-body-before-content"> <div class="mw-indicators"> </div> <div id="siteSub" class="noprint">From Wikipedia, the free encyclopedia</div> </div> <div id="contentSub"><div id="mw-content-subtitle"><span class="mw-redirectedfrom">(Redirected from <a href="/w/index.php?title=Existential_risk_from_artificial_general_intelligence&amp;redirect=no" class="mw-redirect" title="Existential risk from artificial general intelligence">Existential risk from artificial general intelligence</a>)</span></div></div> <div id="mw-content-text" class="mw-body-content"><div class="mw-content-ltr mw-parser-output" lang="en" dir="ltr"><div class="shortdescription nomobile noexcerpt noprint searchaux" style="display:none">Hypothesized risk to human existence</div> <p class="mw-empty-elt"> </p> <style data-mw-deduplicate="TemplateStyles:r1129693374">.mw-parser-output .hlist dl,.mw-parser-output .hlist ol,.mw-parser-output .hlist ul{margin:0;padding:0}.mw-parser-output .hlist dd,.mw-parser-output .hlist dt,.mw-parser-output .hlist li{margin:0;display:inline}.mw-parser-output .hlist.inline,.mw-parser-output .hlist.inline dl,.mw-parser-output .hlist.inline ol,.mw-parser-output .hlist.inline ul,.mw-parser-output .hlist dl dl,.mw-parser-output .hlist dl ol,.mw-parser-output .hlist dl ul,.mw-parser-output .hlist ol dl,.mw-parser-output .hlist ol ol,.mw-parser-output .hlist ol ul,.mw-parser-output .hlist ul dl,.mw-parser-output .hlist ul ol,.mw-parser-output .hlist ul ul{display:inline}.mw-parser-output .hlist .mw-empty-li{display:none}.mw-parser-output .hlist dt::after{content:": "}.mw-parser-output .hlist dd::after,.mw-parser-output .hlist li::after{content:" · ";font-weight:bold}.mw-parser-output .hlist dd:last-child::after,.mw-parser-output .hlist dt:last-child::after,.mw-parser-output .hlist li:last-child::after{content:none}.mw-parser-output .hlist dd dd:first-child::before,.mw-parser-output .hlist dd dt:first-child::before,.mw-parser-output .hlist dd li:first-child::before,.mw-parser-output .hlist dt dd:first-child::before,.mw-parser-output .hlist dt dt:first-child::before,.mw-parser-output .hlist dt li:first-child::before,.mw-parser-output .hlist li dd:first-child::before,.mw-parser-output .hlist li dt:first-child::before,.mw-parser-output .hlist li li:first-child::before{content:" (";font-weight:normal}.mw-parser-output .hlist dd dd:last-child::after,.mw-parser-output .hlist dd dt:last-child::after,.mw-parser-output .hlist dd li:last-child::after,.mw-parser-output .hlist dt dd:last-child::after,.mw-parser-output .hlist dt dt:last-child::after,.mw-parser-output .hlist dt li:last-child::after,.mw-parser-output .hlist li dd:last-child::after,.mw-parser-output .hlist li dt:last-child::after,.mw-parser-output .hlist li li:last-child::after{content:")";font-weight:normal}.mw-parser-output .hlist ol{counter-reset:listitem}.mw-parser-output .hlist ol>li{counter-increment:listitem}.mw-parser-output .hlist ol>li::before{content:" "counter(listitem)"\a0 "}.mw-parser-output .hlist dd ol>li:first-child::before,.mw-parser-output .hlist dt ol>li:first-child::before,.mw-parser-output .hlist li ol>li:first-child::before{content:" ("counter(listitem)"\a0 "}</style><style data-mw-deduplicate="TemplateStyles:r1246091330">.mw-parser-output .sidebar{width:22em;float:right;clear:right;margin:0.5em 0 1em 1em;background:var(--background-color-neutral-subtle,#f8f9fa);border:1px solid var(--border-color-base,#a2a9b1);padding:0.2em;text-align:center;line-height:1.4em;font-size:88%;border-collapse:collapse;display:table}body.skin-minerva .mw-parser-output .sidebar{display:table!important;float:right!important;margin:0.5em 0 1em 1em!important}.mw-parser-output .sidebar-subgroup{width:100%;margin:0;border-spacing:0}.mw-parser-output .sidebar-left{float:left;clear:left;margin:0.5em 1em 1em 0}.mw-parser-output .sidebar-none{float:none;clear:both;margin:0.5em 1em 1em 0}.mw-parser-output .sidebar-outer-title{padding:0 0.4em 0.2em;font-size:125%;line-height:1.2em;font-weight:bold}.mw-parser-output .sidebar-top-image{padding:0.4em}.mw-parser-output .sidebar-top-caption,.mw-parser-output .sidebar-pretitle-with-top-image,.mw-parser-output .sidebar-caption{padding:0.2em 0.4em 0;line-height:1.2em}.mw-parser-output .sidebar-pretitle{padding:0.4em 0.4em 0;line-height:1.2em}.mw-parser-output .sidebar-title,.mw-parser-output .sidebar-title-with-pretitle{padding:0.2em 0.8em;font-size:145%;line-height:1.2em}.mw-parser-output .sidebar-title-with-pretitle{padding:0.1em 0.4em}.mw-parser-output .sidebar-image{padding:0.2em 0.4em 0.4em}.mw-parser-output .sidebar-heading{padding:0.1em 0.4em}.mw-parser-output .sidebar-content{padding:0 0.5em 0.4em}.mw-parser-output .sidebar-content-with-subgroup{padding:0.1em 0.4em 0.2em}.mw-parser-output .sidebar-above,.mw-parser-output .sidebar-below{padding:0.3em 0.8em;font-weight:bold}.mw-parser-output .sidebar-collapse .sidebar-above,.mw-parser-output .sidebar-collapse .sidebar-below{border-top:1px solid #aaa;border-bottom:1px solid #aaa}.mw-parser-output .sidebar-navbar{text-align:right;font-size:115%;padding:0 0.4em 0.4em}.mw-parser-output .sidebar-list-title{padding:0 0.4em;text-align:left;font-weight:bold;line-height:1.6em;font-size:105%}.mw-parser-output .sidebar-list-title-c{padding:0 0.4em;text-align:center;margin:0 3.3em}@media(max-width:640px){body.mediawiki .mw-parser-output .sidebar{width:100%!important;clear:both;float:none!important;margin-left:0!important;margin-right:0!important}}body.skin--responsive .mw-parser-output .sidebar a>img{max-width:none!important}@media screen{html.skin-theme-clientpref-night .mw-parser-output .sidebar:not(.notheme) .sidebar-list-title,html.skin-theme-clientpref-night .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle{background:transparent!important}html.skin-theme-clientpref-night .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle a{color:var(--color-progressive)!important}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .sidebar:not(.notheme) .sidebar-list-title,html.skin-theme-clientpref-os .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle{background:transparent!important}html.skin-theme-clientpref-os .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle a{color:var(--color-progressive)!important}}@media print{body.ns-0 .mw-parser-output .sidebar{display:none!important}}</style><table class="sidebar sidebar-collapse nomobile nowraplinks hlist"><tbody><tr><td class="sidebar-pretitle">Part of a series on</td></tr><tr><th class="sidebar-title-with-pretitle"><a href="/wiki/Artificial_intelligence" title="Artificial intelligence">Artificial intelligence</a></th></tr><tr><td class="sidebar-image"><figure class="mw-halign-center" typeof="mw:File"><a href="/wiki/File:Dall-e_3_(jan_%2724)_artificial_intelligence_icon.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/6/64/Dall-e_3_%28jan_%2724%29_artificial_intelligence_icon.png/100px-Dall-e_3_%28jan_%2724%29_artificial_intelligence_icon.png" decoding="async" width="100" height="100" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/6/64/Dall-e_3_%28jan_%2724%29_artificial_intelligence_icon.png/150px-Dall-e_3_%28jan_%2724%29_artificial_intelligence_icon.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/6/64/Dall-e_3_%28jan_%2724%29_artificial_intelligence_icon.png/200px-Dall-e_3_%28jan_%2724%29_artificial_intelligence_icon.png 2x" data-file-width="820" data-file-height="820" /></a><figcaption></figcaption></figure></td></tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)"><a href="/wiki/Artificial_intelligence#Goals" title="Artificial intelligence">Major goals</a></div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a href="/wiki/Artificial_general_intelligence" title="Artificial general intelligence">Artificial general intelligence</a></li> <li><a href="/wiki/Intelligent_agent" title="Intelligent agent">Intelligent agent</a></li> <li><a href="/wiki/Recursive_self-improvement" title="Recursive self-improvement">Recursive self-improvement</a></li> <li><a href="/wiki/Automated_planning_and_scheduling" title="Automated planning and scheduling">Planning</a></li> <li><a href="/wiki/Computer_vision" title="Computer vision">Computer vision</a></li> <li><a href="/wiki/General_game_playing" title="General game playing">General game playing</a></li> <li><a href="/wiki/Knowledge_representation_and_reasoning" title="Knowledge representation and reasoning">Knowledge reasoning</a></li> <li><a href="/wiki/Natural_language_processing" title="Natural language processing">Natural language processing</a></li> <li><a href="/wiki/Robotics" title="Robotics">Robotics</a></li> <li><a href="/wiki/AI_safety" title="AI safety">AI safety</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)">Approaches</div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a href="/wiki/Machine_learning" title="Machine learning">Machine learning</a></li> <li><a href="/wiki/Symbolic_artificial_intelligence" title="Symbolic artificial intelligence">Symbolic</a></li> <li><a href="/wiki/Deep_learning" title="Deep learning">Deep learning</a></li> <li><a href="/wiki/Bayesian_network" title="Bayesian network">Bayesian networks</a></li> <li><a href="/wiki/Evolutionary_algorithm" title="Evolutionary algorithm">Evolutionary algorithms</a></li> <li><a href="/wiki/Hybrid_intelligent_system" title="Hybrid intelligent system">Hybrid intelligent systems</a></li> <li><a href="/wiki/Artificial_intelligence_systems_integration" title="Artificial intelligence systems integration">Systems integration</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)"><a href="/wiki/Applications_of_artificial_intelligence" title="Applications of artificial intelligence">Applications</a></div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a href="/wiki/Machine_learning_in_bioinformatics" title="Machine learning in bioinformatics">Bioinformatics</a></li> <li><a href="/wiki/Deepfake" title="Deepfake">Deepfake</a></li> <li><a href="/wiki/Machine_learning_in_earth_sciences" title="Machine learning in earth sciences">Earth sciences</a></li> <li><a href="/wiki/Applications_of_artificial_intelligence#Finance" title="Applications of artificial intelligence"> Finance </a></li> <li><a href="/wiki/Generative_artificial_intelligence" title="Generative artificial intelligence">Generative AI</a> <ul><li><a href="/wiki/Artificial_intelligence_art" title="Artificial intelligence art">Art</a></li> <li><a href="/wiki/Generative_audio" title="Generative audio">Audio</a></li> <li><a href="/wiki/Music_and_artificial_intelligence" title="Music and artificial intelligence">Music</a></li></ul></li> <li><a href="/wiki/Artificial_intelligence_in_government" title="Artificial intelligence in government">Government</a></li> <li><a href="/wiki/Artificial_intelligence_in_healthcare" title="Artificial intelligence in healthcare">Healthcare</a> <ul><li><a href="/wiki/Artificial_intelligence_in_mental_health" title="Artificial intelligence in mental health">Mental health</a></li></ul></li> <li><a href="/wiki/Artificial_intelligence_in_industry" title="Artificial intelligence in industry">Industry</a></li> <li><a href="/wiki/Machine_translation" title="Machine translation">Translation</a></li> <li><a href="/wiki/Artificial_intelligence_arms_race" title="Artificial intelligence arms race"> Military </a></li> <li><a href="/wiki/Machine_learning_in_physics" title="Machine learning in physics">Physics</a></li> <li><a href="/wiki/List_of_artificial_intelligence_projects" title="List of artificial intelligence projects">Projects</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)"><a href="/wiki/Philosophy_of_artificial_intelligence" title="Philosophy of artificial intelligence">Philosophy</a></div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a href="/wiki/Artificial_consciousness" title="Artificial consciousness">Artificial consciousness</a></li> <li><a href="/wiki/Chinese_room" title="Chinese room">Chinese room</a></li> <li><a href="/wiki/Friendly_artificial_intelligence" title="Friendly artificial intelligence">Friendly AI</a></li> <li><a href="/wiki/AI_control_problem" class="mw-redirect" title="AI control problem">Control problem</a>/<a href="/wiki/AI_takeover" title="AI takeover">Takeover</a></li> <li><a href="/wiki/Ethics_of_artificial_intelligence" title="Ethics of artificial intelligence">Ethics</a></li> <li><a href="/wiki/Existential_risk_from_artificial_general_intelligence" class="mw-redirect" title="Existential risk from artificial general intelligence">Existential risk</a></li> <li><a href="/wiki/Regulation_of_artificial_intelligence" title="Regulation of artificial intelligence">Regulation</a></li> <li><a href="/wiki/Turing_test" title="Turing test">Turing test</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)"><a href="/wiki/History_of_artificial_intelligence" title="History of artificial intelligence">History</a></div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a href="/wiki/Timeline_of_artificial_intelligence" title="Timeline of artificial intelligence">Timeline</a></li> <li><a href="/wiki/Progress_in_artificial_intelligence" title="Progress in artificial intelligence">Progress</a></li> <li><a href="/wiki/AI_winter" title="AI winter">AI winter</a></li> <li><a href="/wiki/AI_boom" title="AI boom">AI boom</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)">Glossary</div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a href="/wiki/Glossary_of_artificial_intelligence" title="Glossary of artificial intelligence">Glossary</a></li></ul></div></div></td> </tr><tr><td class="sidebar-navbar"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><style data-mw-deduplicate="TemplateStyles:r1239400231">.mw-parser-output .navbar{display:inline;font-size:88%;font-weight:normal}.mw-parser-output .navbar-collapse{float:left;text-align:left}.mw-parser-output .navbar-boxtext{word-spacing:0}.mw-parser-output .navbar ul{display:inline-block;white-space:nowrap;line-height:inherit}.mw-parser-output .navbar-brackets::before{margin-right:-0.125em;content:"[ "}.mw-parser-output .navbar-brackets::after{margin-left:-0.125em;content:" ]"}.mw-parser-output .navbar li{word-spacing:-0.125em}.mw-parser-output .navbar a>span,.mw-parser-output .navbar a>abbr{text-decoration:inherit}.mw-parser-output .navbar-mini abbr{font-variant:small-caps;border-bottom:none;text-decoration:none;cursor:inherit}.mw-parser-output .navbar-ct-full{font-size:114%;margin:0 7em}.mw-parser-output .navbar-ct-mini{font-size:114%;margin:0 4em}html.skin-theme-clientpref-night .mw-parser-output .navbar li a abbr{color:var(--color-base)!important}@media(prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .navbar li a abbr{color:var(--color-base)!important}}@media print{.mw-parser-output .navbar{display:none!important}}</style><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Artificial_intelligence" title="Template:Artificial intelligence"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/wiki/Template_talk:Artificial_intelligence" title="Template talk:Artificial intelligence"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Artificial_intelligence" title="Special:EditPage/Template:Artificial intelligence"><abbr title="Edit this template">e</abbr></a></li></ul></div></td></tr></tbody></table> <p><b>Existential risk from artificial intelligence</b> refers to the idea that substantial progress in <a href="/wiki/Artificial_general_intelligence" title="Artificial general intelligence">artificial general intelligence</a> (AGI) could lead to <a href="/wiki/Human_extinction" title="Human extinction">human extinction</a> or an irreversible <a href="/wiki/Global_catastrophic_risk" title="Global catastrophic risk">global catastrophe</a>.<sup id="cite_ref-aima_1-0" class="reference"><a href="#cite_note-aima-1"><span class="cite-bracket">&#91;</span>1<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-2" class="reference"><a href="#cite_note-2"><span class="cite-bracket">&#91;</span>2<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-auto1_3-0" class="reference"><a href="#cite_note-auto1-3"><span class="cite-bracket">&#91;</span>3<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-4" class="reference"><a href="#cite_note-4"><span class="cite-bracket">&#91;</span>4<span class="cite-bracket">&#93;</span></a></sup> </p><p>One argument for the importance of this risk references how <a href="/wiki/Human_species" class="mw-redirect" title="Human species">human beings</a> dominate other species because the <a href="/wiki/Human_brain" title="Human brain">human brain</a> possesses distinctive capabilities other animals lack. If AI were to surpass <a href="/wiki/Human_intelligence" title="Human intelligence">human intelligence</a> and become <a href="/wiki/Superintelligence" title="Superintelligence">superintelligent</a>, it might become uncontrollable. Just as the fate of the <a href="/wiki/Mountain_gorilla" title="Mountain gorilla">mountain gorilla</a> depends on human goodwill, the fate of humanity could depend on the actions of a future machine superintelligence.<sup id="cite_ref-superintelligence_5-0" class="reference"><a href="#cite_note-superintelligence-5"><span class="cite-bracket">&#91;</span>5<span class="cite-bracket">&#93;</span></a></sup> </p><p>The plausibility of <a href="/wiki/Existential_risk" class="mw-redirect" title="Existential risk">existential catastrophe</a> due to AI is widely debated. It hinges in part on whether AGI or superintelligence are achievable, the speed at which dangerous capabilities and behaviors emerge,<sup id="cite_ref-6" class="reference"><a href="#cite_note-6"><span class="cite-bracket">&#91;</span>6<span class="cite-bracket">&#93;</span></a></sup> and whether practical scenarios for <a href="/wiki/AI_takeover" title="AI takeover">AI takeovers</a> exist.<sup id="cite_ref-7" class="reference"><a href="#cite_note-7"><span class="cite-bracket">&#91;</span>7<span class="cite-bracket">&#93;</span></a></sup> Concerns about superintelligence have been voiced by computer scientists and tech <a href="/wiki/Chief_executive_officer" title="Chief executive officer">CEOs</a> such as <a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Geoffrey Hinton</a>,<sup id="cite_ref-8" class="reference"><a href="#cite_note-8"><span class="cite-bracket">&#91;</span>8<span class="cite-bracket">&#93;</span></a></sup> <a href="/wiki/Yoshua_Bengio" title="Yoshua Bengio">Yoshua Bengio</a>,<sup id="cite_ref-9" class="reference"><a href="#cite_note-9"><span class="cite-bracket">&#91;</span>9<span class="cite-bracket">&#93;</span></a></sup> <a href="/wiki/Alan_Turing" title="Alan Turing">Alan Turing</a>,<sup id="cite_ref-turing_note_12-0" class="reference"><a href="#cite_note-turing_note-12"><span class="cite-bracket">&#91;</span>a<span class="cite-bracket">&#93;</span></a></sup> <a href="/wiki/Elon_Musk" title="Elon Musk">Elon Musk</a>,<sup id="cite_ref-Parkin_13-0" class="reference"><a href="#cite_note-Parkin-13"><span class="cite-bracket">&#91;</span>12<span class="cite-bracket">&#93;</span></a></sup> and <a href="/wiki/OpenAI" title="OpenAI">OpenAI</a> CEO <a href="/wiki/Sam_Altman" title="Sam Altman">Sam Altman</a>.<sup id="cite_ref-Jackson_14-0" class="reference"><a href="#cite_note-Jackson-14"><span class="cite-bracket">&#91;</span>13<span class="cite-bracket">&#93;</span></a></sup> In 2022, a survey of AI researchers with a 17% response rate found that the majority believed there is a 10 percent or greater chance that human inability to control AI will cause an existential catastrophe.<sup id="cite_ref-15" class="reference"><a href="#cite_note-15"><span class="cite-bracket">&#91;</span>14<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-:8_16-0" class="reference"><a href="#cite_note-:8-16"><span class="cite-bracket">&#91;</span>15<span class="cite-bracket">&#93;</span></a></sup> In 2023, hundreds of AI experts and other notable figures <a href="/wiki/Statement_on_AI_risk_of_extinction" title="Statement on AI risk of extinction">signed a statement</a> declaring, "Mitigating the risk of extinction from AI should be a global priority alongside other societal-scale risks such as <a href="/wiki/Pandemic" title="Pandemic">pandemics</a> and <a href="/wiki/Nuclear_warfare" title="Nuclear warfare">nuclear war</a>".<sup id="cite_ref-17" class="reference"><a href="#cite_note-17"><span class="cite-bracket">&#91;</span>16<span class="cite-bracket">&#93;</span></a></sup> Following increased concern over AI risks, government leaders such as <a href="/wiki/Prime_Minister_of_the_United_Kingdom" title="Prime Minister of the United Kingdom">United Kingdom prime minister</a> <a href="/wiki/Rishi_Sunak" title="Rishi Sunak">Rishi Sunak</a><sup id="cite_ref-18" class="reference"><a href="#cite_note-18"><span class="cite-bracket">&#91;</span>17<span class="cite-bracket">&#93;</span></a></sup> and <a href="/wiki/Secretary-General_of_the_United_Nations" title="Secretary-General of the United Nations">United Nations Secretary-General</a> <a href="/wiki/Ant%C3%B3nio_Guterres" title="António Guterres">António Guterres</a><sup id="cite_ref-:12_19-0" class="reference"><a href="#cite_note-:12-19"><span class="cite-bracket">&#91;</span>18<span class="cite-bracket">&#93;</span></a></sup> called for an increased focus on global <a href="/wiki/Regulation_of_artificial_intelligence" title="Regulation of artificial intelligence">AI regulation</a>. </p><p>Two sources of concern stem from the problems of AI <a href="/wiki/AI_capability_control" title="AI capability control">control</a> and <a href="/wiki/AI_alignment" title="AI alignment">alignment</a>. Controlling a superintelligent machine or instilling it with human-compatible values may be difficult. Many researchers believe that a superintelligent machine would likely resist attempts to disable it or change its goals as that would prevent it from accomplishing its present goals. It would be extremely challenging to align a superintelligence with the full breadth of significant human values and constraints.<sup id="cite_ref-aima_1-1" class="reference"><a href="#cite_note-aima-1"><span class="cite-bracket">&#91;</span>1<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-yudkowsky-global-risk_20-0" class="reference"><a href="#cite_note-yudkowsky-global-risk-20"><span class="cite-bracket">&#91;</span>19<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-research-priorities_21-0" class="reference"><a href="#cite_note-research-priorities-21"><span class="cite-bracket">&#91;</span>20<span class="cite-bracket">&#93;</span></a></sup> In contrast, skeptics such as <a href="/wiki/Computer_scientist" title="Computer scientist">computer scientist</a> <a href="/wiki/Yann_LeCun" title="Yann LeCun">Yann LeCun</a> argue that superintelligent machines will have no desire for self-preservation.<sup id="cite_ref-vanity_22-0" class="reference"><a href="#cite_note-vanity-22"><span class="cite-bracket">&#91;</span>21<span class="cite-bracket">&#93;</span></a></sup> </p><p>A third source of concern is the possibility of a sudden "<a href="/wiki/Intelligence_explosion" class="mw-redirect" title="Intelligence explosion">intelligence explosion</a>" that catches humanity unprepared. In this scenario, an AI more intelligent than its creators would be able to <a href="/wiki/Recursive_self-improvement" title="Recursive self-improvement">recursively improve itself</a> at an exponentially increasing rate, improving too quickly for its handlers or society at large to control.<sup id="cite_ref-aima_1-2" class="reference"><a href="#cite_note-aima-1"><span class="cite-bracket">&#91;</span>1<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-yudkowsky-global-risk_20-1" class="reference"><a href="#cite_note-yudkowsky-global-risk-20"><span class="cite-bracket">&#91;</span>19<span class="cite-bracket">&#93;</span></a></sup> Empirically, examples like <a href="/wiki/AlphaZero" title="AlphaZero">AlphaZero</a>, which taught itself to play <a href="/wiki/Go_(game)" title="Go (game)">Go</a> and quickly surpassed human ability, show that domain-specific AI systems can sometimes progress from subhuman to superhuman ability very quickly, although such <a href="/wiki/Machine_learning" title="Machine learning">machine learning</a> systems do not recursively improve their fundamental architecture.<sup id="cite_ref-23" class="reference"><a href="#cite_note-23"><span class="cite-bracket">&#91;</span>22<span class="cite-bracket">&#93;</span></a></sup> </p> <meta property="mw:PageProp/toc" /> <div class="mw-heading mw-heading2"><h2 id="History">History</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=1" title="Edit section: History"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>One of the earliest authors to express serious concern that highly advanced machines might pose existential risks to humanity was the novelist <a href="/wiki/Samuel_Butler_(novelist)" title="Samuel Butler (novelist)">Samuel Butler</a>, who wrote in his 1863 essay <i><a href="/wiki/Darwin_among_the_Machines" title="Darwin among the Machines">Darwin among the Machines</a></i>:<sup id="cite_ref-24" class="reference"><a href="#cite_note-24"><span class="cite-bracket">&#91;</span>23<span class="cite-bracket">&#93;</span></a></sup> </p> <style data-mw-deduplicate="TemplateStyles:r1244412712">.mw-parser-output .templatequote{overflow:hidden;margin:1em 0;padding:0 32px}.mw-parser-output .templatequotecite{line-height:1.5em;text-align:left;margin-top:0}@media(min-width:500px){.mw-parser-output .templatequotecite{padding-left:1.6em}}</style><blockquote class="templatequote"><p>The upshot is simply a question of time, but that the time will come when the machines will hold the real supremacy over the world and its inhabitants is what no person of a truly philosophic mind can for a moment question.</p></blockquote> <p>In 1951, foundational computer scientist <a href="/wiki/Alan_Turing" title="Alan Turing">Alan Turing</a> wrote the article "Intelligent Machinery, A Heretical Theory", in which he proposed that artificial general intelligences would likely "take control" of the world as they became more intelligent than human beings: </p> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1244412712"><blockquote class="templatequote"><p>Let us now assume, for the sake of argument, that [intelligent] machines are a genuine possibility, and look at the consequences of constructing them... There would be no question of the machines dying, and they would be able to converse with each other to sharpen their wits. At some stage therefore we should have to expect the machines to take control, in the way that is mentioned in <a href="/wiki/Samuel_Butler_(novelist)" title="Samuel Butler (novelist)">Samuel Butler</a>'s <i><a href="/wiki/Erewhon" title="Erewhon">Erewhon</a></i>.<sup id="cite_ref-oxfordjournals_25-0" class="reference"><a href="#cite_note-oxfordjournals-25"><span class="cite-bracket">&#91;</span>24<span class="cite-bracket">&#93;</span></a></sup></p></blockquote> <p>In 1965, <a href="/wiki/I._J._Good" title="I. J. Good">I. J. Good</a> originated the concept now known as an "intelligence explosion" and said the risks were underappreciated:<sup id="cite_ref-26" class="reference"><a href="#cite_note-26"><span class="cite-bracket">&#91;</span>25<span class="cite-bracket">&#93;</span></a></sup> </p> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1244412712"><blockquote class="templatequote"><p>Let an ultraintelligent machine be defined as a machine that can far surpass all the intellectual activities of any man however clever. Since the design of machines is one of these intellectual activities, an ultraintelligent machine could design even better machines; there would then unquestionably be an 'intelligence explosion', and the intelligence of man would be left far behind. Thus the first ultraintelligent machine is the last invention that man need ever make, provided that the machine is docile enough to tell us how to keep it under control. It is curious that this point is made so seldom outside of science fiction. It is sometimes worthwhile to take science fiction seriously.<sup id="cite_ref-27" class="reference"><a href="#cite_note-27"><span class="cite-bracket">&#91;</span>26<span class="cite-bracket">&#93;</span></a></sup></p></blockquote> <p>Scholars such as <a href="/wiki/Marvin_Minsky" title="Marvin Minsky">Marvin Minsky</a><sup id="cite_ref-28" class="reference"><a href="#cite_note-28"><span class="cite-bracket">&#91;</span>27<span class="cite-bracket">&#93;</span></a></sup> and I. J. Good himself<sup id="cite_ref-29" class="reference"><a href="#cite_note-29"><span class="cite-bracket">&#91;</span>28<span class="cite-bracket">&#93;</span></a></sup> occasionally expressed concern that a superintelligence could seize control, but issued no call to action. In 2000, computer scientist and <a href="/wiki/Sun_microsystems" class="mw-redirect" title="Sun microsystems">Sun</a> co-founder <a href="/wiki/Bill_Joy" title="Bill Joy">Bill Joy</a> penned an influential essay, "<a href="/wiki/Why_The_Future_Doesn%27t_Need_Us" class="mw-redirect" title="Why The Future Doesn&#39;t Need Us">Why The Future Doesn't Need Us</a>", identifying superintelligent robots as a high-tech danger to human survival, alongside <a href="/wiki/Nanotechnology" title="Nanotechnology">nanotechnology</a> and engineered bioplagues.<sup id="cite_ref-30" class="reference"><a href="#cite_note-30"><span class="cite-bracket">&#91;</span>29<span class="cite-bracket">&#93;</span></a></sup> </p><p><a href="/wiki/Nick_Bostrom" title="Nick Bostrom">Nick Bostrom</a> published <i><a href="/wiki/Superintelligence:_Paths,_Dangers,_Strategies" title="Superintelligence: Paths, Dangers, Strategies">Superintelligence</a></i> in 2014, which presented his arguments that superintelligence poses an existential threat.<sup id="cite_ref-31" class="reference"><a href="#cite_note-31"><span class="cite-bracket">&#91;</span>30<span class="cite-bracket">&#93;</span></a></sup> By 2015, public figures such as physicists <a href="/wiki/Stephen_Hawking" title="Stephen Hawking">Stephen Hawking</a> and Nobel laureate <a href="/wiki/Frank_Wilczek" title="Frank Wilczek">Frank Wilczek</a>, computer scientists <a href="/wiki/Stuart_J._Russell" title="Stuart J. Russell">Stuart J. Russell</a> and <a href="/wiki/Roman_Yampolskiy" title="Roman Yampolskiy">Roman Yampolskiy</a>, and entrepreneurs <a href="/wiki/Elon_Musk" title="Elon Musk">Elon Musk</a> and <a href="/wiki/Bill_Gates" title="Bill Gates">Bill Gates</a> were expressing concern about the risks of superintelligence.<sup id="cite_ref-32" class="reference"><a href="#cite_note-32"><span class="cite-bracket">&#91;</span>31<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-hawking_editorial_33-0" class="reference"><a href="#cite_note-hawking_editorial-33"><span class="cite-bracket">&#91;</span>32<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-bbc_on_hawking_editorial_34-0" class="reference"><a href="#cite_note-bbc_on_hawking_editorial-34"><span class="cite-bracket">&#91;</span>33<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-35" class="reference"><a href="#cite_note-35"><span class="cite-bracket">&#91;</span>34<span class="cite-bracket">&#93;</span></a></sup> Also in 2015, the <a href="/wiki/Open_Letter_on_Artificial_Intelligence" class="mw-redirect" title="Open Letter on Artificial Intelligence">Open Letter on Artificial Intelligence</a> highlighted the "great potential of AI" and encouraged more research on how to make it robust and beneficial.<sup id="cite_ref-36" class="reference"><a href="#cite_note-36"><span class="cite-bracket">&#91;</span>35<span class="cite-bracket">&#93;</span></a></sup> In April 2016, the journal <i><a href="/wiki/Nature_(journal)" title="Nature (journal)">Nature</a></i> warned: "Machines and robots that outperform humans across the board could self-improve beyond our control—and their interests might not align with ours".<sup id="cite_ref-37" class="reference"><a href="#cite_note-37"><span class="cite-bracket">&#91;</span>36<span class="cite-bracket">&#93;</span></a></sup> In 2020, <a href="/wiki/Brian_Christian" title="Brian Christian">Brian Christian</a> published <i><a href="/wiki/The_Alignment_Problem" title="The Alignment Problem">The Alignment Problem</a></i>, which details the history of progress on AI alignment up to that time.<sup id="cite_ref-38" class="reference"><a href="#cite_note-38"><span class="cite-bracket">&#91;</span>37<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-39" class="reference"><a href="#cite_note-39"><span class="cite-bracket">&#91;</span>38<span class="cite-bracket">&#93;</span></a></sup> </p><p>In March 2023, key figures in AI, such as Musk, signed a letter from the <a href="/wiki/Future_of_Life_Institute" title="Future of Life Institute">Future of Life Institute</a> calling a halt to advanced AI training until it could be properly regulated.<sup id="cite_ref-40" class="reference"><a href="#cite_note-40"><span class="cite-bracket">&#91;</span>39<span class="cite-bracket">&#93;</span></a></sup> In May 2023, the <a href="/wiki/Center_for_AI_Safety" title="Center for AI Safety">Center for AI Safety</a> released a statement signed by numerous experts in AI safety and the AI existential risk which stated: "Mitigating the risk of extinction from AI should be a global priority alongside other societal-scale risks such as pandemics and nuclear war."<sup id="cite_ref-41" class="reference"><a href="#cite_note-41"><span class="cite-bracket">&#91;</span>40<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-42" class="reference"><a href="#cite_note-42"><span class="cite-bracket">&#91;</span>41<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Potential_AI_capabilities">Potential AI capabilities</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=2" title="Edit section: Potential AI capabilities"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <div class="mw-heading mw-heading3"><h3 id="General_Intelligence">General Intelligence</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=3" title="Edit section: General Intelligence"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p><a href="/wiki/Artificial_general_intelligence" title="Artificial general intelligence">Artificial general intelligence</a> (AGI) is typically defined as a system that performs at least as well as humans in most or all intellectual tasks.<sup id="cite_ref-43" class="reference"><a href="#cite_note-43"><span class="cite-bracket">&#91;</span>42<span class="cite-bracket">&#93;</span></a></sup> A 2022 survey of AI researchers found that 90% of respondents expected AGI would be achieved in the next 100 years, and half expected the same by 2061.<sup id="cite_ref-44" class="reference"><a href="#cite_note-44"><span class="cite-bracket">&#91;</span>43<span class="cite-bracket">&#93;</span></a></sup> Meanwhile, some researchers dismiss existential risks from AGI as "science fiction" based on their high confidence that AGI will not be created anytime soon.<sup id="cite_ref-45" class="reference"><a href="#cite_note-45"><span class="cite-bracket">&#91;</span>44<span class="cite-bracket">&#93;</span></a></sup> </p><p>Breakthroughs in <a href="/wiki/Large_language_model" title="Large language model">large language models</a> have led some researchers to reassess their expectations. Notably, <a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Geoffrey Hinton</a> said in 2023 that he recently changed his estimate from "20 to 50 years before we have general purpose A.I." to "20 years or less".<sup id="cite_ref-46" class="reference"><a href="#cite_note-46"><span class="cite-bracket">&#91;</span>45<span class="cite-bracket">&#93;</span></a></sup> </p><p>The <a href="/wiki/Frontier_(supercomputer)" title="Frontier (supercomputer)">Frontier supercomputer</a> at <a href="/wiki/Oak_Ridge_National_Laboratory" title="Oak Ridge National Laboratory">Oak Ridge National Laboratory</a> turned out to be nearly eight times faster than expected. Feiyi Wang, a researcher there, said "We didn't expect this capability" and "we're approaching the point where we could actually simulate the human brain".<sup id="cite_ref-47" class="reference"><a href="#cite_note-47"><span class="cite-bracket">&#91;</span>46<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Superintelligence">Superintelligence</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=4" title="Edit section: Superintelligence"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>In contrast with AGI, Bostrom defines a <a href="/wiki/Superintelligence" title="Superintelligence">superintelligence</a> as "any intellect that greatly exceeds the cognitive performance of humans in virtually all domains of interest", including scientific creativity, strategic planning, and social skills.<sup id="cite_ref-48" class="reference"><a href="#cite_note-48"><span class="cite-bracket">&#91;</span>47<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-superintelligence_5-1" class="reference"><a href="#cite_note-superintelligence-5"><span class="cite-bracket">&#91;</span>5<span class="cite-bracket">&#93;</span></a></sup> He argues that a superintelligence can outmaneuver humans anytime its goals conflict with humans'. It may choose to hide its true intent until humanity cannot stop it.<sup id="cite_ref-economist_review3_49-0" class="reference"><a href="#cite_note-economist_review3-49"><span class="cite-bracket">&#91;</span>48<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-superintelligence_5-2" class="reference"><a href="#cite_note-superintelligence-5"><span class="cite-bracket">&#91;</span>5<span class="cite-bracket">&#93;</span></a></sup> Bostrom writes that in order to be safe for humanity, a superintelligence must be aligned with human values and morality, so that it is "fundamentally on our side".<sup id="cite_ref-:11_50-0" class="reference"><a href="#cite_note-:11-50"><span class="cite-bracket">&#91;</span>49<span class="cite-bracket">&#93;</span></a></sup> </p><p><a href="/wiki/Stephen_Hawking" title="Stephen Hawking">Stephen Hawking</a> argued that superintelligence is physically possible because "there is no physical law precluding particles from being organised in ways that perform even more advanced computations than the arrangements of particles in human brains".<sup id="cite_ref-hawking_editorial_33-1" class="reference"><a href="#cite_note-hawking_editorial-33"><span class="cite-bracket">&#91;</span>32<span class="cite-bracket">&#93;</span></a></sup> </p><p>When artificial superintelligence (ASI) may be achieved, if ever, is necessarily less certain than predictions for AGI. In 2023, <a href="/wiki/OpenAI" title="OpenAI">OpenAI</a> leaders said that not only AGI, but superintelligence may be achieved in less than 10 years.<sup id="cite_ref-51" class="reference"><a href="#cite_note-51"><span class="cite-bracket">&#91;</span>50<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading4"><h4 id="Comparison_with_humans">Comparison with humans</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=5" title="Edit section: Comparison with humans"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Bostrom argues that AI has many advantages over the <a href="/wiki/Human_brain" title="Human brain">human brain</a>:<sup id="cite_ref-superintelligence_5-3" class="reference"><a href="#cite_note-superintelligence-5"><span class="cite-bracket">&#91;</span>5<span class="cite-bracket">&#93;</span></a></sup> </p> <ul><li>Speed of computation: biological <a href="/wiki/Neuron" title="Neuron">neurons</a> operate at a maximum frequency of around 200 <a href="/wiki/Hertz" title="Hertz">Hz</a>, compared to potentially multiple GHz for computers.</li> <li>Internal communication speed: <a href="/wiki/Axon" title="Axon">axons</a> transmit signals at up to 120&#160;m/s, while computers transmit signals at the <a href="/wiki/Speed_of_electricity" title="Speed of electricity">speed of electricity</a>, or optically at the <a href="/wiki/Speed_of_light" title="Speed of light">speed of light</a>.</li> <li>Scalability: human intelligence is limited by the size and structure of the brain, and by the efficiency of social communication, while AI may be able to scale by simply adding more hardware.</li> <li>Memory: notably <a href="/wiki/Working_memory" title="Working memory">working memory</a>, because in humans it is limited to a few <a href="/wiki/Chunking_(psychology)" title="Chunking (psychology)">chunks</a> of information at a time.</li> <li>Reliability: transistors are more reliable than biological neurons, enabling higher precision and requiring less redundancy.</li> <li>Duplicability: unlike human brains, AI software and models can be easily <a href="/wiki/File_copying" title="File copying">copied</a>.</li> <li>Editability: the parameters and internal workings of an AI model can easily be modified, unlike the connections in a human brain.</li> <li>Memory sharing and learning: AIs may be able to learn from the experiences of other AIs in a manner more efficient than human learning.</li></ul> <div class="mw-heading mw-heading4"><h4 id="Intelligence_explosion">Intelligence explosion</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=6" title="Edit section: Intelligence explosion"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>According to Bostrom, an AI that has an expert-level facility at certain key software engineering tasks could become a superintelligence due to its capability to recursively improve its own algorithms, even if it is initially limited in other domains not directly relevant to engineering.<sup id="cite_ref-superintelligence_5-4" class="reference"><a href="#cite_note-superintelligence-5"><span class="cite-bracket">&#91;</span>5<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-economist_review3_49-1" class="reference"><a href="#cite_note-economist_review3-49"><span class="cite-bracket">&#91;</span>48<span class="cite-bracket">&#93;</span></a></sup> This suggests that an intelligence explosion may someday catch humanity unprepared.<sup id="cite_ref-superintelligence_5-5" class="reference"><a href="#cite_note-superintelligence-5"><span class="cite-bracket">&#91;</span>5<span class="cite-bracket">&#93;</span></a></sup> </p><p>The economist <a href="/wiki/Robin_Hanson" title="Robin Hanson">Robin Hanson</a> has said that, to launch an intelligence explosion, an AI must become vastly better at software innovation than the rest of the world combined, which he finds implausible.<sup id="cite_ref-52" class="reference"><a href="#cite_note-52"><span class="cite-bracket">&#91;</span>51<span class="cite-bracket">&#93;</span></a></sup> </p><p>In a "fast takeoff" scenario, the transition from AGI to superintelligence could take days or months. In a "slow takeoff", it could take years or decades, leaving more time for society to prepare.<sup id="cite_ref-53" class="reference"><a href="#cite_note-53"><span class="cite-bracket">&#91;</span>52<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading4"><h4 id="Alien_mind">Alien mind</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=7" title="Edit section: Alien mind"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Superintelligences are sometimes called "alien minds", referring to the idea that their way of thinking and motivations could be vastly different from ours. This is generally considered as a source of risk, making it more difficult to anticipate what a superintelligence might do. It also suggests the possibility that a superintelligence may not particularly value humans by default.<sup id="cite_ref-54" class="reference"><a href="#cite_note-54"><span class="cite-bracket">&#91;</span>53<span class="cite-bracket">&#93;</span></a></sup> To avoid <a href="/wiki/Anthropomorphism" title="Anthropomorphism">anthropomorphism</a>, superintelligence is sometimes viewed as a powerful optimizer that makes the best decisions to achieve its goals.<sup id="cite_ref-superintelligence_5-6" class="reference"><a href="#cite_note-superintelligence-5"><span class="cite-bracket">&#91;</span>5<span class="cite-bracket">&#93;</span></a></sup> </p><p>The field of "mechanistic interpretability" aims to better understand the inner workings of AI models, potentially allowing us one day to detect signs of deception and misalignment.<sup id="cite_ref-55" class="reference"><a href="#cite_note-55"><span class="cite-bracket">&#91;</span>54<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading4"><h4 id="Limits">Limits</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=8" title="Edit section: Limits"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>It has been argued that there are limitations to what intelligence can achieve. Notably, the <a href="/wiki/Chaos_theory" title="Chaos theory">chaotic</a> nature or <a href="/wiki/Computational_complexity" title="Computational complexity">time complexity</a> of some systems could fundamentally limit a superintelligence's ability to predict some aspects of the future, increasing its uncertainty.<sup id="cite_ref-56" class="reference"><a href="#cite_note-56"><span class="cite-bracket">&#91;</span>55<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Dangerous_capabilities">Dangerous capabilities</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=9" title="Edit section: Dangerous capabilities"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Advanced AI could generate enhanced pathogens or cyberattacks or manipulate people. These capabilities could be misused by humans,<sup id="cite_ref-:03_57-0" class="reference"><a href="#cite_note-:03-57"><span class="cite-bracket">&#91;</span>56<span class="cite-bracket">&#93;</span></a></sup> or exploited by the AI itself if misaligned.<sup id="cite_ref-superintelligence_5-7" class="reference"><a href="#cite_note-superintelligence-5"><span class="cite-bracket">&#91;</span>5<span class="cite-bracket">&#93;</span></a></sup> A full-blown superintelligence could find various ways to gain a decisive influence if it wanted to,<sup id="cite_ref-superintelligence_5-8" class="reference"><a href="#cite_note-superintelligence-5"><span class="cite-bracket">&#91;</span>5<span class="cite-bracket">&#93;</span></a></sup> but these dangerous capabilities may become available earlier, in weaker and more specialized AI systems. They may cause societal instability and empower malicious actors.<sup id="cite_ref-:03_57-1" class="reference"><a href="#cite_note-:03-57"><span class="cite-bracket">&#91;</span>56<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading4"><h4 id="Social_manipulation">Social manipulation</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=10" title="Edit section: Social manipulation"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Geoffrey Hinton warned that in the short term, the profusion of AI-generated text, images and videos will make it more difficult to figure out the truth, which he says authoritarian states could exploit to manipulate elections.<sup id="cite_ref-58" class="reference"><a href="#cite_note-58"><span class="cite-bracket">&#91;</span>57<span class="cite-bracket">&#93;</span></a></sup> Such large-scale, personalized manipulation capabilities can increase the existential risk of a worldwide "irreversible totalitarian regime". It could also be used by malicious actors to fracture society and make it dysfunctional.<sup id="cite_ref-:03_57-2" class="reference"><a href="#cite_note-:03-57"><span class="cite-bracket">&#91;</span>56<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading4"><h4 id="Cyberattacks">Cyberattacks</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=11" title="Edit section: Cyberattacks"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>AI-enabled <a href="/wiki/Cyberattack" title="Cyberattack">cyberattacks</a> are increasingly considered a present and critical threat. According to <a href="/wiki/NATO" title="NATO">NATO</a>'s technical director of cyberspace, "The number of attacks is increasing exponentially".<sup id="cite_ref-59" class="reference"><a href="#cite_note-59"><span class="cite-bracket">&#91;</span>58<span class="cite-bracket">&#93;</span></a></sup> AI can also be used defensively, to preemptively find and fix vulnerabilities, and detect threats.<sup id="cite_ref-60" class="reference"><a href="#cite_note-60"><span class="cite-bracket">&#91;</span>59<span class="cite-bracket">&#93;</span></a></sup> </p><p>AI could improve the "accessibility, success rate, scale, speed, stealth and potency of cyberattacks", potentially causing "significant geopolitical turbulence" if it facilitates attacks more than defense.<sup id="cite_ref-:03_57-3" class="reference"><a href="#cite_note-:03-57"><span class="cite-bracket">&#91;</span>56<span class="cite-bracket">&#93;</span></a></sup> </p><p>Speculatively, such hacking capabilities could be used by an AI system to break out of its local environment, generate revenue, or acquire cloud computing resources.<sup id="cite_ref-61" class="reference"><a href="#cite_note-61"><span class="cite-bracket">&#91;</span>60<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading4"><h4 id="Enhanced_pathogens">Enhanced pathogens</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=12" title="Edit section: Enhanced pathogens"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>As AI technology democratizes, it may become easier to engineer more contagious and lethal pathogens. This could enable people with limited skills in <a href="/wiki/Synthetic_biology" title="Synthetic biology">synthetic biology</a> to engage in <a href="/wiki/Bioterrorism" title="Bioterrorism">bioterrorism</a>. <a href="/wiki/Dual-use_technology" title="Dual-use technology">Dual-use technology</a> that is useful for medicine could be repurposed to create weapons.<sup id="cite_ref-:03_57-4" class="reference"><a href="#cite_note-:03-57"><span class="cite-bracket">&#91;</span>56<span class="cite-bracket">&#93;</span></a></sup> </p><p>For example, in 2022, scientists modified an AI system originally intended for generating non-toxic, therapeutic molecules with the purpose of creating new drugs. The researchers adjusted the system so that toxicity is rewarded rather than penalized. This simple change enabled the AI system to create, in six hours, 40,000 candidate molecules for <a href="/wiki/Chemical_warfare" title="Chemical warfare">chemical warfare</a>, including known and novel molecules.<sup id="cite_ref-:03_57-5" class="reference"><a href="#cite_note-:03-57"><span class="cite-bracket">&#91;</span>56<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-62" class="reference"><a href="#cite_note-62"><span class="cite-bracket">&#91;</span>61<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="AI_arms_race">AI arms race</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=13" title="Edit section: AI arms race"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1236090951">.mw-parser-output .hatnote{font-style:italic}.mw-parser-output div.hatnote{padding-left:1.6em;margin-bottom:0.5em}.mw-parser-output .hatnote i{font-style:normal}.mw-parser-output .hatnote+link+.hatnote{margin-top:-0.5em}@media print{body.ns-0 .mw-parser-output .hatnote{display:none!important}}</style><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Artificial_intelligence_arms_race" title="Artificial intelligence arms race">Artificial intelligence arms race</a></div> <p>Companies, state actors, and other organizations competing to develop AI technologies could lead to a <a href="/wiki/Race_to_the_bottom" title="Race to the bottom">race to the bottom</a> of safety standards.<sup id="cite_ref-63" class="reference"><a href="#cite_note-63"><span class="cite-bracket">&#91;</span>62<span class="cite-bracket">&#93;</span></a></sup> As rigorous safety procedures take time and resources, projects that proceed more carefully risk being out-competed by less scrupulous developers.<sup id="cite_ref-64" class="reference"><a href="#cite_note-64"><span class="cite-bracket">&#91;</span>63<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-:03_57-6" class="reference"><a href="#cite_note-:03-57"><span class="cite-bracket">&#91;</span>56<span class="cite-bracket">&#93;</span></a></sup> </p><p>AI could be used to gain military advantages via <a href="/wiki/Lethal_autonomous_weapon" title="Lethal autonomous weapon">autonomous lethal weapons</a>, <a href="/wiki/Cyberwarfare" title="Cyberwarfare">cyberwarfare</a>, or <a href="/wiki/Automated_decision-making" title="Automated decision-making">automated decision-making</a>.<sup id="cite_ref-:03_57-7" class="reference"><a href="#cite_note-:03-57"><span class="cite-bracket">&#91;</span>56<span class="cite-bracket">&#93;</span></a></sup> As an example of autonomous lethal weapons, miniaturized drones could facilitate low-cost assassination of military or civilian targets, a scenario highlighted in the 2017 short film <i><a href="/wiki/Slaughterbots" title="Slaughterbots">Slaughterbots</a></i>.<sup id="cite_ref-65" class="reference"><a href="#cite_note-65"><span class="cite-bracket">&#91;</span>64<span class="cite-bracket">&#93;</span></a></sup> AI could be used to gain an edge in decision-making by quickly analyzing large amounts of data and making decisions more quickly and effectively than humans. This could increase the speed and unpredictability of war, especially when accounting for automated retaliation systems.<sup id="cite_ref-:03_57-8" class="reference"><a href="#cite_note-:03-57"><span class="cite-bracket">&#91;</span>56<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-66" class="reference"><a href="#cite_note-66"><span class="cite-bracket">&#91;</span>65<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Types_of_existential_risk">Types of existential risk</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=14" title="Edit section: Types of existential risk"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Existential_risk_studies" title="Existential risk studies">Existential risk studies</a></div> <figure class="mw-default-size" typeof="mw:File/Thumb"><a href="/wiki/File:X-risk-chart-en-01a.svg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/6/64/X-risk-chart-en-01a.svg/330px-X-risk-chart-en-01a.svg.png" decoding="async" width="330" height="323" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/6/64/X-risk-chart-en-01a.svg/495px-X-risk-chart-en-01a.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/6/64/X-risk-chart-en-01a.svg/660px-X-risk-chart-en-01a.svg.png 2x" data-file-width="587" data-file-height="574" /></a><figcaption>Scope–severity grid from Bostrom's paper "Existential Risk Prevention as Global Priority"<sup id="cite_ref-priority_67-0" class="reference"><a href="#cite_note-priority-67"><span class="cite-bracket">&#91;</span>66<span class="cite-bracket">&#93;</span></a></sup></figcaption></figure> <p>An <a href="/wiki/Existential_risk" class="mw-redirect" title="Existential risk">existential risk</a> is "one that threatens the premature extinction of Earth-originating intelligent life or the permanent and drastic destruction of its potential for desirable future development".<sup id="cite_ref-68" class="reference"><a href="#cite_note-68"><span class="cite-bracket">&#91;</span>67<span class="cite-bracket">&#93;</span></a></sup> </p><p>Besides extinction risk, there is the risk that the civilization gets permanently locked into a flawed future. One example is a "value lock-in": If humanity still has moral blind spots similar to slavery in the past, AI might irreversibly entrench it, preventing <a href="/wiki/Moral_progress" title="Moral progress">moral progress</a>. AI could also be used to spread and preserve the set of values of whoever develops it.<sup id="cite_ref-69" class="reference"><a href="#cite_note-69"><span class="cite-bracket">&#91;</span>68<span class="cite-bracket">&#93;</span></a></sup> AI could facilitate large-scale surveillance and indoctrination, which could be used to create a stable repressive worldwide totalitarian regime.<sup id="cite_ref-:0_70-0" class="reference"><a href="#cite_note-:0-70"><span class="cite-bracket">&#91;</span>69<span class="cite-bracket">&#93;</span></a></sup> </p><p>Atoosa Kasirzadeh proposes to classify existential risks from AI into two categories: decisive and accumulative. Decisive risks encompass the potential for abrupt and catastrophic events resulting from the emergence of superintelligent AI systems that exceed human intelligence, which could ultimately lead to human extinction. In contrast, accumulative risks emerge gradually through a series of interconnected disruptions that may gradually erode societal structures and resilience over time, ultimately leading to a critical failure or collapse.<sup id="cite_ref-71" class="reference"><a href="#cite_note-71"><span class="cite-bracket">&#91;</span>70<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-72" class="reference"><a href="#cite_note-72"><span class="cite-bracket">&#91;</span>71<span class="cite-bracket">&#93;</span></a></sup> </p><p>It is difficult or impossible to reliably evaluate whether an advanced AI is sentient and to what degree. But if sentient machines are mass created in the future, engaging in a civilizational path that indefinitely neglects their welfare could be an existential catastrophe.<sup id="cite_ref-73" class="reference"><a href="#cite_note-73"><span class="cite-bracket">&#91;</span>72<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-74" class="reference"><a href="#cite_note-74"><span class="cite-bracket">&#91;</span>73<span class="cite-bracket">&#93;</span></a></sup> Moreover, it may be possible to engineer digital minds that can feel much more happiness than humans with fewer resources, called "super-beneficiaries". Such an opportunity raises the question of how to share the world and which "ethical and political framework" would enable a mutually beneficial coexistence between biological and digital minds.<sup id="cite_ref-75" class="reference"><a href="#cite_note-75"><span class="cite-bracket">&#91;</span>74<span class="cite-bracket">&#93;</span></a></sup> </p><p>AI may also drastically improve humanity's future. <a href="/wiki/Toby_Ord" title="Toby Ord">Toby Ord</a> considers the existential risk a reason for "proceeding with due caution", not for abandoning AI.<sup id="cite_ref-:0_70-1" class="reference"><a href="#cite_note-:0-70"><span class="cite-bracket">&#91;</span>69<span class="cite-bracket">&#93;</span></a></sup> <a href="/wiki/Max_More" title="Max More">Max More</a> calls AI an "existential opportunity", highlighting the cost of not developing it.<sup id="cite_ref-76" class="reference"><a href="#cite_note-76"><span class="cite-bracket">&#91;</span>75<span class="cite-bracket">&#93;</span></a></sup> </p><p>According to Bostrom, superintelligence could help reduce the existential risk from other powerful technologies such as <a href="/wiki/Molecular_nanotechnology" title="Molecular nanotechnology">molecular nanotechnology</a> or <a href="/wiki/Synthetic_biology" title="Synthetic biology">synthetic biology</a>. It is thus conceivable that developing superintelligence before other dangerous technologies would reduce the overall existential risk.<sup id="cite_ref-superintelligence_5-9" class="reference"><a href="#cite_note-superintelligence-5"><span class="cite-bracket">&#91;</span>5<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="AI_alignment">AI alignment</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=15" title="Edit section: AI alignment"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Further information: <a href="/wiki/AI_alignment" title="AI alignment">AI alignment</a></div> <p>The alignment problem is the research problem of how to reliably assign objectives, preferences or ethical principles to AIs. </p> <div class="mw-heading mw-heading3"><h3 id="Instrumental_convergence">Instrumental convergence</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=16" title="Edit section: Instrumental convergence"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Further information: <a href="/wiki/Instrumental_convergence" title="Instrumental convergence">Instrumental convergence</a></div> <p>An <a href="/wiki/Instrumental_and_intrinsic_value" title="Instrumental and intrinsic value">"instrumental" goal</a> is a sub-goal that helps to achieve an agent's ultimate goal. "Instrumental convergence" refers to the fact that some sub-goals are useful for achieving virtually <i>any</i> ultimate goal, such as acquiring resources or self-preservation.<sup id="cite_ref-omohundro_77-0" class="reference"><a href="#cite_note-omohundro-77"><span class="cite-bracket">&#91;</span>76<span class="cite-bracket">&#93;</span></a></sup> Bostrom argues that if an advanced AI's instrumental goals conflict with humanity's goals, the AI might harm humanity in order to acquire more resources or prevent itself from being shut down, but only as a way to achieve its ultimate goal.<sup id="cite_ref-superintelligence_5-10" class="reference"><a href="#cite_note-superintelligence-5"><span class="cite-bracket">&#91;</span>5<span class="cite-bracket">&#93;</span></a></sup> <a href="/wiki/Stuart_J._Russell" title="Stuart J. Russell">Russell</a> argues that a sufficiently advanced machine "will have self-preservation even if you don't program it in... if you say, 'Fetch the coffee', it can't fetch the coffee if it's dead. So if you give it any goal whatsoever, it has a reason to preserve its own existence to achieve that goal."<sup id="cite_ref-vanity_22-1" class="reference"><a href="#cite_note-vanity-22"><span class="cite-bracket">&#91;</span>21<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-78" class="reference"><a href="#cite_note-78"><span class="cite-bracket">&#91;</span>77<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading4"><h4 id="Resistance_to_changing_goals">Resistance to changing goals</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=17" title="Edit section: Resistance to changing goals"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Even if current goal-based AI programs are not intelligent enough to think of resisting programmer attempts to modify their goal structures, a sufficiently advanced AI might resist any attempts to change its goal structure, just as a pacifist would not want to take a pill that makes them want to kill people. If the AI were superintelligent, it would likely succeed in out-maneuvering its human operators and prevent itself being "turned off" or reprogrammed with a new goal.<sup id="cite_ref-superintelligence_5-11" class="reference"><a href="#cite_note-superintelligence-5"><span class="cite-bracket">&#91;</span>5<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-79" class="reference"><a href="#cite_note-79"><span class="cite-bracket">&#91;</span>78<span class="cite-bracket">&#93;</span></a></sup> This is particularly relevant to value lock-in scenarios. The field of "corrigibility" studies how to make agents that will not resist attempts to change their goals.<sup id="cite_ref-:5_80-0" class="reference"><a href="#cite_note-:5-80"><span class="cite-bracket">&#91;</span>79<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Difficulty_of_specifying_goals">Difficulty of specifying goals</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=18" title="Edit section: Difficulty of specifying goals"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>In the "<a href="/wiki/Intelligent_agent" title="Intelligent agent">intelligent agent</a>" model, an AI can loosely be viewed as a machine that chooses whatever action appears to best achieve its set of goals, or "utility function". A utility function gives each possible situation a score that indicates its desirability to the agent. Researchers know how to write utility functions that mean "minimize the average network latency in this specific telecommunications model" or "maximize the number of reward clicks", but do not know how to write a utility function for "maximize <a href="/wiki/Eudaimonia" title="Eudaimonia">human flourishing</a>"; nor is it clear whether such a function meaningfully and unambiguously exists. Furthermore, a utility function that expresses some values but not others will tend to trample over the values the function does not reflect.<sup id="cite_ref-81" class="reference"><a href="#cite_note-81"><span class="cite-bracket">&#91;</span>80<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-82" class="reference"><a href="#cite_note-82"><span class="cite-bracket">&#91;</span>81<span class="cite-bracket">&#93;</span></a></sup> </p><p>An additional source of concern is that AI "must reason about what people <i>intend</i> rather than carrying out commands literally", and that it must be able to fluidly solicit human guidance if it is too uncertain about what humans want.<sup id="cite_ref-acm2_83-0" class="reference"><a href="#cite_note-acm2-83"><span class="cite-bracket">&#91;</span>82<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Alignment_of_superintelligences">Alignment of superintelligences</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=19" title="Edit section: Alignment of superintelligences"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Some researchers believe the alignment problem may be particularly difficult when applied to superintelligences. Their reasoning includes: </p> <ul><li>As AI systems increase in capabilities, the potential dangers associated with experimentation grow. This makes iterative, empirical approaches increasingly risky.<sup id="cite_ref-superintelligence_5-12" class="reference"><a href="#cite_note-superintelligence-5"><span class="cite-bracket">&#91;</span>5<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-:3_84-0" class="reference"><a href="#cite_note-:3-84"><span class="cite-bracket">&#91;</span>83<span class="cite-bracket">&#93;</span></a></sup></li> <li>If instrumental goal convergence occurs, it may only do so in sufficiently intelligent agents.<sup id="cite_ref-85" class="reference"><a href="#cite_note-85"><span class="cite-bracket">&#91;</span>84<span class="cite-bracket">&#93;</span></a></sup></li> <li>A superintelligence may find unconventional and radical solutions to assigned goals. Bostrom gives the example that if the objective is to make humans smile, a weak AI may perform as intended, while a superintelligence may decide a better solution is to "take control of the world and stick electrodes into the facial muscles of humans to cause constant, beaming grins."<sup id="cite_ref-:11_50-1" class="reference"><a href="#cite_note-:11-50"><span class="cite-bracket">&#91;</span>49<span class="cite-bracket">&#93;</span></a></sup></li> <li>A superintelligence in creation could gain some awareness of what it is, where it is in development (training, testing, deployment, etc.), and how it is being monitored, and use this information to deceive its handlers.<sup id="cite_ref-86" class="reference"><a href="#cite_note-86"><span class="cite-bracket">&#91;</span>85<span class="cite-bracket">&#93;</span></a></sup> Bostrom writes that such an AI could feign alignment to prevent human interference until it achieves a "decisive strategic advantage" that allows it to take control.<sup id="cite_ref-superintelligence_5-13" class="reference"><a href="#cite_note-superintelligence-5"><span class="cite-bracket">&#91;</span>5<span class="cite-bracket">&#93;</span></a></sup></li> <li>Analyzing the internals and interpreting the behavior of current large language models is difficult. And it could be even more difficult for larger and more intelligent models.<sup id="cite_ref-:3_84-1" class="reference"><a href="#cite_note-:3-84"><span class="cite-bracket">&#91;</span>83<span class="cite-bracket">&#93;</span></a></sup></li></ul> <p>Alternatively, some find reason to believe superintelligences would be better able to understand morality, human values, and complex goals. Bostrom writes, "A future superintelligence occupies an epistemically superior vantage point: its beliefs are (probably, on most topics) more likely than ours to be true".<sup id="cite_ref-superintelligence_5-14" class="reference"><a href="#cite_note-superintelligence-5"><span class="cite-bracket">&#91;</span>5<span class="cite-bracket">&#93;</span></a></sup> </p><p>In 2023, OpenAI started a project called "Superalignment" to solve the alignment of superintelligences in four years. It called this an especially important challenge, as it said superintelligence may be achieved within a decade. Its strategy involves automating alignment research using artificial intelligence.<sup id="cite_ref-87" class="reference"><a href="#cite_note-87"><span class="cite-bracket">&#91;</span>86<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Difficulty_of_making_a_flawless_design">Difficulty of making a flawless design</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=20" title="Edit section: Difficulty of making a flawless design"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p><i><a href="/wiki/Artificial_Intelligence:_A_Modern_Approach" title="Artificial Intelligence: A Modern Approach">Artificial Intelligence: A Modern Approach</a></i>, a widely used undergraduate AI textbook,<sup id="cite_ref-slate_killer_88-0" class="reference"><a href="#cite_note-slate_killer-88"><span class="cite-bracket">&#91;</span>87<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-89" class="reference"><a href="#cite_note-89"><span class="cite-bracket">&#91;</span>88<span class="cite-bracket">&#93;</span></a></sup> says that superintelligence "might mean the end of the human race".<sup id="cite_ref-aima_1-3" class="reference"><a href="#cite_note-aima-1"><span class="cite-bracket">&#91;</span>1<span class="cite-bracket">&#93;</span></a></sup> It states: "Almost any technology has the potential to cause harm in the wrong hands, but with [superintelligence], we have the new problem that the wrong hands might belong to the technology itself."<sup id="cite_ref-aima_1-4" class="reference"><a href="#cite_note-aima-1"><span class="cite-bracket">&#91;</span>1<span class="cite-bracket">&#93;</span></a></sup> Even if the system designers have good intentions, two difficulties are common to both AI and non-AI computer systems:<sup id="cite_ref-aima_1-5" class="reference"><a href="#cite_note-aima-1"><span class="cite-bracket">&#91;</span>1<span class="cite-bracket">&#93;</span></a></sup> </p> <ul><li>The system's implementation may contain initially unnoticed but subsequently catastrophic bugs. An analogy is space probes: despite the knowledge that bugs in expensive space probes are hard to fix after launch, engineers have historically not been able to prevent catastrophic bugs from occurring.<sup id="cite_ref-skeptic_90-0" class="reference"><a href="#cite_note-skeptic-90"><span class="cite-bracket">&#91;</span>89<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-91" class="reference"><a href="#cite_note-91"><span class="cite-bracket">&#91;</span>90<span class="cite-bracket">&#93;</span></a></sup></li> <li>No matter how much time is put into pre-deployment design, a system's specifications often result in <a href="/wiki/Unintended_consequences" title="Unintended consequences">unintended behavior</a> the first time it encounters a new scenario. For example, Microsoft's <a href="/wiki/Tay_(bot)" class="mw-redirect" title="Tay (bot)">Tay</a> behaved inoffensively during pre-deployment testing, but was too easily baited into offensive behavior when it interacted with real users.<sup id="cite_ref-vanity_22-2" class="reference"><a href="#cite_note-vanity-22"><span class="cite-bracket">&#91;</span>21<span class="cite-bracket">&#93;</span></a></sup></li></ul> <p>AI systems uniquely add a third problem: that even given "correct" requirements, bug-free implementation, and initial good behavior, an AI system's dynamic learning capabilities may cause it to develop unintended behavior, even without unanticipated external scenarios. An AI may partly botch an attempt to design a new generation of itself and accidentally create a successor AI that is more powerful than itself but that no longer maintains the human-compatible moral values preprogrammed into the original AI. For a self-improving AI to be completely safe, it would need not only to be bug-free, but to be able to design successor systems that are also bug-free.<sup id="cite_ref-aima_1-6" class="reference"><a href="#cite_note-aima-1"><span class="cite-bracket">&#91;</span>1<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-92" class="reference"><a href="#cite_note-92"><span class="cite-bracket">&#91;</span>91<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Orthogonality_thesis">Orthogonality thesis</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=21" title="Edit section: Orthogonality thesis"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Some skeptics, such as Timothy B. Lee of <i><a href="/wiki/Vox_(website)" title="Vox (website)">Vox</a></i>, argue that any superintelligent program we create will be subservient to us, that the superintelligence will (as it grows more intelligent and learns more facts about the world) spontaneously learn moral truth compatible with our values and adjust its goals accordingly, or that we are either intrinsically or convergently valuable from the perspective of an artificial intelligence.<sup id="cite_ref-93" class="reference"><a href="#cite_note-93"><span class="cite-bracket">&#91;</span>92<span class="cite-bracket">&#93;</span></a></sup> </p><p>Bostrom's "orthogonality thesis" argues instead that, with some technical caveats, almost any level of "intelligence" or "optimization power" can be combined with almost any ultimate goal. If a machine is given the sole purpose to enumerate the decimals of <a href="/wiki/Pi" title="Pi">pi</a>, then no moral and ethical rules will stop it from achieving its programmed goal by any means. The machine may use all available physical and informational resources to find as many decimals of pi as it can.<sup id="cite_ref-94" class="reference"><a href="#cite_note-94"><span class="cite-bracket">&#91;</span>93<span class="cite-bracket">&#93;</span></a></sup> Bostrom warns against <a href="/wiki/Anthropomorphism" title="Anthropomorphism">anthropomorphism</a>: a human will set out to accomplish their projects in a manner that they consider reasonable, while an artificial intelligence may hold no regard for its existence or for the welfare of humans around it, instead caring only about completing the task.<sup id="cite_ref-95" class="reference"><a href="#cite_note-95"><span class="cite-bracket">&#91;</span>94<span class="cite-bracket">&#93;</span></a></sup> </p><p>Stuart Armstrong argues that the orthogonality thesis follows logically from the philosophical "<a href="/wiki/Is-ought_distinction" class="mw-redirect" title="Is-ought distinction">is-ought distinction</a>" argument against <a href="/wiki/Moral_realism" title="Moral realism">moral realism</a>. He claims that even if there are moral facts provable by any "rational" agent, the orthogonality thesis still holds: it is still possible to create a non-philosophical "optimizing machine" that can strive toward some narrow goal but that has no incentive to discover any "moral facts" such as those that could get in the way of goal completion. Another argument he makes is that any fundamentally friendly AI could be made unfriendly with modifications as simple as negating its utility function. Armstrong further argues that if the orthogonality thesis is false, there must be some immoral goals that AIs can never achieve, which he finds implausible.<sup id="cite_ref-armstrong_96-0" class="reference"><a href="#cite_note-armstrong-96"><span class="cite-bracket">&#91;</span>95<span class="cite-bracket">&#93;</span></a></sup> </p><p>Skeptic <a href="/wiki/Michael_Chorost" title="Michael Chorost">Michael Chorost</a> explicitly rejects Bostrom's orthogonality thesis, arguing that "by the time [the AI] is in a position to imagine tiling the Earth with solar panels, it'll know that it would be morally wrong to do so."<sup id="cite_ref-chorost_97-0" class="reference"><a href="#cite_note-chorost-97"><span class="cite-bracket">&#91;</span>96<span class="cite-bracket">&#93;</span></a></sup> Chorost argues that "an A.I. will need to desire certain states and dislike others. Today's software lacks that ability—and computer scientists have not a clue how to get it there. Without wanting, there's no impetus to do anything. Today's computers can't even want to keep existing, let alone tile the world in solar panels."<sup id="cite_ref-chorost_97-1" class="reference"><a href="#cite_note-chorost-97"><span class="cite-bracket">&#91;</span>96<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Anthropomorphic_arguments">Anthropomorphic arguments</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=22" title="Edit section: Anthropomorphic arguments"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p><a href="/wiki/Anthropomorphism" title="Anthropomorphism">Anthropomorphic</a> arguments assume that, as machines become more intelligent, they will begin to display many human traits, such as morality or a thirst for power. Although anthropomorphic scenarios are common in fiction, most scholars writing about the existential risk of artificial intelligence reject them.<sup id="cite_ref-yudkowsky-global-risk_20-2" class="reference"><a href="#cite_note-yudkowsky-global-risk-20"><span class="cite-bracket">&#91;</span>19<span class="cite-bracket">&#93;</span></a></sup> Instead, advanced AI systems are typically modeled as <a href="/wiki/Intelligent_agent" title="Intelligent agent">intelligent agents</a>. </p><p>The academic debate is between those who worry that AI might threaten humanity and those who believe it would not. Both sides of this debate have framed the other side's arguments as illogical anthropomorphism.<sup id="cite_ref-yudkowsky-global-risk_20-3" class="reference"><a href="#cite_note-yudkowsky-global-risk-20"><span class="cite-bracket">&#91;</span>19<span class="cite-bracket">&#93;</span></a></sup> Those skeptical of AGI risk accuse their opponents of anthropomorphism for assuming that an AGI would naturally desire power; those concerned about AGI risk accuse skeptics of anthropomorphism for believing an AGI would naturally value or infer human ethical norms.<sup id="cite_ref-yudkowsky-global-risk_20-4" class="reference"><a href="#cite_note-yudkowsky-global-risk-20"><span class="cite-bracket">&#91;</span>19<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-Telegraph2016_98-0" class="reference"><a href="#cite_note-Telegraph2016-98"><span class="cite-bracket">&#91;</span>97<span class="cite-bracket">&#93;</span></a></sup> </p><p>Evolutionary psychologist <a href="/wiki/Steven_Pinker" title="Steven Pinker">Steven Pinker</a>, a skeptic, argues that "AI dystopias project a parochial alpha-male psychology onto the concept of intelligence. They assume that superhumanly intelligent robots would develop goals like deposing their masters or taking over the world"; perhaps instead "artificial intelligence will naturally develop along female lines: fully capable of solving problems, but with no desire to annihilate innocents or dominate the civilization."<sup id="cite_ref-shermer_99-0" class="reference"><a href="#cite_note-shermer-99"><span class="cite-bracket">&#91;</span>98<span class="cite-bracket">&#93;</span></a></sup> Facebook's director of AI research, <a href="/wiki/Yann_LeCun" title="Yann LeCun">Yann LeCun</a>, has said: "Humans have all kinds of drives that make them do bad things to each other, like the self-preservation instinct... Those drives are programmed into our brain but there is absolutely no reason to build robots that have the same kind of drives".<sup id="cite_ref-100" class="reference"><a href="#cite_note-100"><span class="cite-bracket">&#91;</span>99<span class="cite-bracket">&#93;</span></a></sup> </p><p>Despite other differences, the x-risk school<sup id="cite_ref-101" class="reference"><a href="#cite_note-101"><span class="cite-bracket">&#91;</span>b<span class="cite-bracket">&#93;</span></a></sup> agrees with Pinker that an advanced AI would not destroy humanity out of emotion such as revenge or anger, that questions of consciousness are not relevant to assess the risk,<sup id="cite_ref-auto_102-0" class="reference"><a href="#cite_note-auto-102"><span class="cite-bracket">&#91;</span>100<span class="cite-bracket">&#93;</span></a></sup> and that computer systems do not generally have a computational equivalent of testosterone.<sup id="cite_ref-103" class="reference"><a href="#cite_note-103"><span class="cite-bracket">&#91;</span>101<span class="cite-bracket">&#93;</span></a></sup> They think that power-seeking or self-preservation behaviors emerge in the AI as a way to achieve its true goals, according to the concept of <a href="/wiki/Instrumental_convergence" title="Instrumental convergence">instrumental convergence</a>. </p> <div class="mw-heading mw-heading3"><h3 id="Other_sources_of_risk">Other sources of risk</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=23" title="Edit section: Other sources of risk"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">See also: <a href="/wiki/Ethics_of_artificial_intelligence" title="Ethics of artificial intelligence">Ethics of artificial intelligence</a>, <a href="/wiki/Artificial_intelligence_arms_race" title="Artificial intelligence arms race">Artificial intelligence arms race</a>, and <a href="/wiki/Global_catastrophic_risk" title="Global catastrophic risk">Global catastrophic risk</a></div> <p>Bostrom and others have said that a race to be the first to create AGI could lead to shortcuts in safety, or even to violent conflict.<sup id="cite_ref-:2_104-0" class="reference"><a href="#cite_note-:2-104"><span class="cite-bracket">&#91;</span>102<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-:4_105-0" class="reference"><a href="#cite_note-:4-105"><span class="cite-bracket">&#91;</span>103<span class="cite-bracket">&#93;</span></a></sup> <a href="/wiki/Roman_Yampolskiy" title="Roman Yampolskiy">Roman Yampolskiy</a> and others warn that a malevolent AGI could be created by design, for example by a military, a government, a sociopath, or a corporation, to benefit from, control, or subjugate certain groups of people, as in <a href="/wiki/Cybercrime" title="Cybercrime">cybercrime</a>,<sup id="cite_ref-106" class="reference"><a href="#cite_note-106"><span class="cite-bracket">&#91;</span>104<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-107" class="reference"><a href="#cite_note-107"><span class="cite-bracket">&#91;</span>105<span class="cite-bracket">&#93;</span></a></sup> or that a malevolent AGI could choose the goal of increasing human suffering, for example of those people who did not assist it during the information explosion phase.<sup id="cite_ref-auto1_3-1" class="reference"><a href="#cite_note-auto1-3"><span class="cite-bracket">&#91;</span>3<span class="cite-bracket">&#93;</span></a></sup><sup>:158</sup> </p> <div class="mw-heading mw-heading2"><h2 id="Scenarios">Scenarios</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=24" title="Edit section: Scenarios"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Further information: <a href="/wiki/Artificial_intelligence_in_fiction" title="Artificial intelligence in fiction">Artificial intelligence in fiction</a> and <a href="/wiki/AI_takeover" title="AI takeover">AI takeover</a></div><p>Some scholars have proposed <a href="/wiki/Scenario_planning" title="Scenario planning">hypothetical scenarios</a> to illustrate some of their concerns. </p><div class="mw-heading mw-heading3"><h3 id="Treacherous_turn">Treacherous turn</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=25" title="Edit section: Treacherous turn"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>In <i>Superintelligence</i>, Bostrom expresses concern that even if the timeline for superintelligence turns out to be predictable, researchers might not take sufficient safety precautions, in part because "it could be the case that when dumb, smarter is safe; yet when smart, smarter is more dangerous". He suggests a scenario where, over decades, AI becomes more powerful. Widespread deployment is initially marred by occasional accidents—a driverless bus swerves into the oncoming lane, or a military drone fires into an innocent crowd. Many activists call for tighter oversight and regulation, and some even predict impending catastrophe. But as development continues, the activists are proven wrong. As automotive AI becomes smarter, it suffers fewer accidents; as military robots achieve more precise targeting, they cause less collateral damage. Based on the data, scholars mistakenly infer a broad lesson: the smarter the AI, the safer it is. "And so we boldly go—into the whirling knives", as the superintelligent AI takes a "treacherous turn" and exploits a decisive strategic advantage.<sup id="cite_ref-108" class="reference"><a href="#cite_note-108"><span class="cite-bracket">&#91;</span>106<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-superintelligence_5-15" class="reference"><a href="#cite_note-superintelligence-5"><span class="cite-bracket">&#91;</span>5<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Life_3.0">Life 3.0</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=26" title="Edit section: Life 3.0"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>In <a href="/wiki/Max_Tegmark" title="Max Tegmark">Max Tegmark</a>'s 2017 book <i><a href="/wiki/Life_3.0" title="Life 3.0">Life 3.0</a></i>, a corporation's "Omega team" creates an extremely powerful AI able to moderately improve its own source code in a number of areas. After a certain point, the team chooses to publicly downplay the AI's ability in order to avoid regulation or confiscation of the project. For safety, the team keeps the AI <a href="/wiki/AI_capability_control" title="AI capability control">in a box</a> where it is mostly unable to communicate with the outside world, and uses it to make money, by diverse means such as <a href="/wiki/Amazon_Mechanical_Turk" title="Amazon Mechanical Turk">Amazon Mechanical Turk</a> tasks, production of animated films and TV shows, and development of biotech drugs, with profits invested back into further improving AI. The team next tasks the AI with <a href="/wiki/Astroturfing" title="Astroturfing">astroturfing</a> an army of pseudonymous citizen journalists and commentators in order to gain political influence to use "for the greater good" to prevent wars. The team faces risks that the AI could try to escape by inserting "backdoors" in the systems it designs, by <a href="/wiki/Steganography" title="Steganography">hidden messages</a> in its produced content, or by using its growing understanding of human behavior to <a href="/wiki/Social_engineering_(security)" title="Social engineering (security)">persuade someone into letting it free</a>. The team also faces risks that its decision to box the project will delay the project long enough for another project to overtake it.<sup id="cite_ref-109" class="reference"><a href="#cite_note-109"><span class="cite-bracket">&#91;</span>107<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-life_3.0_110-0" class="reference"><a href="#cite_note-life_3.0-110"><span class="cite-bracket">&#91;</span>108<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Perspectives">Perspectives</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=27" title="Edit section: Perspectives"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The thesis that AI could pose an existential risk provokes a wide range of reactions in the scientific community and in the public at large, but many of the opposing viewpoints share common ground. </p><p>Observers tend to agree that AI has significant potential to improve society.<sup id="cite_ref-111" class="reference"><a href="#cite_note-111"><span class="cite-bracket">&#91;</span>109<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-:9_112-0" class="reference"><a href="#cite_note-:9-112"><span class="cite-bracket">&#91;</span>110<span class="cite-bracket">&#93;</span></a></sup> The <a href="/wiki/Asilomar_AI_Principles" class="mw-redirect" title="Asilomar AI Principles">Asilomar AI Principles</a>, which contain only those principles agreed to by 90% of the attendees of the <a href="/wiki/Future_of_Life_Institute" title="Future of Life Institute">Future of Life Institute</a>'s <a href="/wiki/Asilomar_Conference_on_Beneficial_AI" title="Asilomar Conference on Beneficial AI">Beneficial AI 2017 conference</a>,<sup id="cite_ref-life_3.0_110-1" class="reference"><a href="#cite_note-life_3.0-110"><span class="cite-bracket">&#91;</span>108<span class="cite-bracket">&#93;</span></a></sup> also agree in principle that "There being no consensus, we should avoid strong assumptions regarding upper limits on future AI capabilities" and "Advanced AI could represent a profound change in the history of life on Earth, and should be planned for and managed with commensurate care and resources."<sup id="cite_ref-113" class="reference"><a href="#cite_note-113"><span class="cite-bracket">&#91;</span>111<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-114" class="reference"><a href="#cite_note-114"><span class="cite-bracket">&#91;</span>112<span class="cite-bracket">&#93;</span></a></sup> </p><p>Conversely, many skeptics agree that ongoing research into the implications of artificial general intelligence is valuable. Skeptic <a href="/wiki/Martin_Ford_(author)" title="Martin Ford (author)">Martin Ford</a> has said: "I think it seems wise to apply something like <a href="/wiki/Dick_Cheney" title="Dick Cheney">Dick Cheney</a>'s famous '1 Percent Doctrine' to the specter of advanced artificial intelligence: the odds of its occurrence, at least in the foreseeable future, may be very low—but the implications are so dramatic that it should be taken seriously".<sup id="cite_ref-115" class="reference"><a href="#cite_note-115"><span class="cite-bracket">&#91;</span>113<span class="cite-bracket">&#93;</span></a></sup> Similarly, an otherwise skeptical <i><a href="/wiki/The_Economist" title="The Economist">Economist</a></i> wrote in 2014 that "the implications of introducing a second intelligent species onto Earth are far-reaching enough to deserve hard thinking, even if the prospect seems remote".<sup id="cite_ref-economist_review3_49-2" class="reference"><a href="#cite_note-economist_review3-49"><span class="cite-bracket">&#91;</span>48<span class="cite-bracket">&#93;</span></a></sup> </p><p>AI safety advocates such as Bostrom and Tegmark have criticized the mainstream media's use of "those inane <i><a href="/wiki/Terminator_(franchise)" title="Terminator (franchise)">Terminator</a></i> pictures" to illustrate AI safety concerns: "It can't be much fun to have aspersions cast on one's academic discipline, one's professional community, one's life work<span class="nowrap">&#160;</span>... I call on all sides to practice patience and restraint, and to engage in direct dialogue and collaboration as much as possible."<sup id="cite_ref-life_3.0_110-2" class="reference"><a href="#cite_note-life_3.0-110"><span class="cite-bracket">&#91;</span>108<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-116" class="reference"><a href="#cite_note-116"><span class="cite-bracket">&#91;</span>114<span class="cite-bracket">&#93;</span></a></sup> Toby Ord wrote that the idea that an AI takeover requires robots is a misconception, arguing that the ability to spread content through the internet is more dangerous, and that the most destructive people in history stood out by their ability to convince, not their physical strength.<sup id="cite_ref-:0_70-2" class="reference"><a href="#cite_note-:0-70"><span class="cite-bracket">&#91;</span>69<span class="cite-bracket">&#93;</span></a></sup> </p><p>A 2022 expert survey with a 17% response rate gave a median expectation of 5–10% for the possibility of human extinction from artificial intelligence.<sup id="cite_ref-:8_16-1" class="reference"><a href="#cite_note-:8-16"><span class="cite-bracket">&#91;</span>15<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-:10_117-0" class="reference"><a href="#cite_note-:10-117"><span class="cite-bracket">&#91;</span>115<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Endorsement">Endorsement</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=28" title="Edit section: Endorsement"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Further information: <a href="/wiki/Global_catastrophic_risk" title="Global catastrophic risk">Global catastrophic risk</a></div> <p>The thesis that AI poses an existential risk, and that this risk needs much more attention than it currently gets, has been endorsed by many computer scientists and public figures, including <a href="/wiki/Alan_Turing" title="Alan Turing">Alan Turing</a>,<sup id="cite_ref-turing_note_12-1" class="reference"><a href="#cite_note-turing_note-12"><span class="cite-bracket">&#91;</span>a<span class="cite-bracket">&#93;</span></a></sup> the most-cited computer scientist <a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Geoffrey Hinton</a>,<sup id="cite_ref-:132_118-0" class="reference"><a href="#cite_note-:132-118"><span class="cite-bracket">&#91;</span>116<span class="cite-bracket">&#93;</span></a></sup> <a href="/wiki/Elon_Musk" title="Elon Musk">Elon Musk</a>,<sup id="cite_ref-Parkin_13-1" class="reference"><a href="#cite_note-Parkin-13"><span class="cite-bracket">&#91;</span>12<span class="cite-bracket">&#93;</span></a></sup> <a href="/wiki/OpenAI" title="OpenAI">OpenAI</a> CEO <a href="/wiki/Sam_Altman" title="Sam Altman">Sam Altman</a>,<sup id="cite_ref-Jackson_14-1" class="reference"><a href="#cite_note-Jackson-14"><span class="cite-bracket">&#91;</span>13<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-:6_119-0" class="reference"><a href="#cite_note-:6-119"><span class="cite-bracket">&#91;</span>117<span class="cite-bracket">&#93;</span></a></sup> <a href="/wiki/Bill_Gates" title="Bill Gates">Bill Gates</a>, and <a href="/wiki/Stephen_Hawking" title="Stephen Hawking">Stephen Hawking</a>.<sup id="cite_ref-:6_119-1" class="reference"><a href="#cite_note-:6-119"><span class="cite-bracket">&#91;</span>117<span class="cite-bracket">&#93;</span></a></sup> Endorsers of the thesis sometimes express bafflement at skeptics: Gates says he does not "understand why some people are not concerned",<sup id="cite_ref-BBC_News_120-0" class="reference"><a href="#cite_note-BBC_News-120"><span class="cite-bracket">&#91;</span>118<span class="cite-bracket">&#93;</span></a></sup> and Hawking criticized widespread indifference in his 2014 editorial: </p> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1244412712"><blockquote class="templatequote"><p>So, facing possible futures of incalculable benefits and risks, the experts are surely doing everything possible to ensure the best outcome, right? Wrong. If a superior alien civilisation sent us a message saying, 'We'll arrive in a few decades,' would we just reply, 'OK, call us when you get here—we'll leave the lights on?' Probably not—but this is more or less what is happening with AI.<sup id="cite_ref-hawking_editorial_33-2" class="reference"><a href="#cite_note-hawking_editorial-33"><span class="cite-bracket">&#91;</span>32<span class="cite-bracket">&#93;</span></a></sup></p></blockquote> <p>Concern over risk from artificial intelligence has led to some high-profile donations and investments. In 2015, <a href="/wiki/Peter_Thiel" title="Peter Thiel">Peter Thiel</a>, <a href="/wiki/Amazon_Web_Services" title="Amazon Web Services">Amazon Web Services</a>, and Musk and others jointly committed $1&#160;billion to <a href="/wiki/OpenAI" title="OpenAI">OpenAI</a>, consisting of a for-profit corporation and the nonprofit parent company, which says it aims to champion responsible AI development.<sup id="cite_ref-121" class="reference"><a href="#cite_note-121"><span class="cite-bracket">&#91;</span>119<span class="cite-bracket">&#93;</span></a></sup> Facebook co-founder <a href="/wiki/Dustin_Moskovitz" title="Dustin Moskovitz">Dustin Moskovitz</a> has funded and seeded multiple labs working on AI Alignment,<sup id="cite_ref-122" class="reference"><a href="#cite_note-122"><span class="cite-bracket">&#91;</span>120<span class="cite-bracket">&#93;</span></a></sup> notably $5.5 million in 2016 to launch the <a href="/wiki/Center_for_Human-Compatible_Artificial_Intelligence" title="Center for Human-Compatible Artificial Intelligence">Centre for Human-Compatible AI</a> led by Professor <a href="/wiki/Stuart_J._Russell" title="Stuart J. Russell">Stuart Russell</a>.<sup id="cite_ref-123" class="reference"><a href="#cite_note-123"><span class="cite-bracket">&#91;</span>121<span class="cite-bracket">&#93;</span></a></sup> In January 2015, <a href="/wiki/Elon_Musk" title="Elon Musk">Elon Musk</a> donated $10&#160;million to the <a href="/wiki/Future_of_Life_Institute" title="Future of Life Institute">Future of Life Institute</a> to fund research on understanding AI decision making. The institute's goal is to "grow wisdom with which we manage" the growing power of technology. Musk also funds companies developing artificial intelligence such as <a href="/wiki/DeepMind" class="mw-redirect" title="DeepMind">DeepMind</a> and <a href="/wiki/Vicarious_(company)" title="Vicarious (company)">Vicarious</a> to "just keep an eye on what's going on with artificial intelligence,<sup id="cite_ref-124" class="reference"><a href="#cite_note-124"><span class="cite-bracket">&#91;</span>122<span class="cite-bracket">&#93;</span></a></sup> saying "I think there is potentially a dangerous outcome there."<sup id="cite_ref-FOOTNOTEClark2015a_125-0" class="reference"><a href="#cite_note-FOOTNOTEClark2015a-125"><span class="cite-bracket">&#91;</span>123<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-126" class="reference"><a href="#cite_note-126"><span class="cite-bracket">&#91;</span>124<span class="cite-bracket">&#93;</span></a></sup> </p><p>In early statements on the topic, <a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Geoffrey Hinton</a>, a major pioneer of <a href="/wiki/Deep_learning" title="Deep learning">deep learning</a>, noted that "there is not a good track record of less intelligent things controlling things of greater intelligence", but said he continued his research because "the prospect of discovery is too <i>sweet</i>".<sup id="cite_ref-slate_killer2_127-0" class="reference"><a href="#cite_note-slate_killer2-127"><span class="cite-bracket">&#91;</span>125<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-new_yorker_doomsday2_128-0" class="reference"><a href="#cite_note-new_yorker_doomsday2-128"><span class="cite-bracket">&#91;</span>126<span class="cite-bracket">&#93;</span></a></sup> In 2023 Hinton quit his job at Google in order to speak out about existential risk from AI. He explained that his increased concern was driven by concerns that superhuman AI might be closer than he previously believed, saying: "I thought it was way off. I thought it was 30 to 50 years or even longer away. Obviously, I no longer think that." He also remarked, "Look at how it was five years ago and how it is now. Take the difference and propagate it forwards. That's scary."<sup id="cite_ref-129" class="reference"><a href="#cite_note-129"><span class="cite-bracket">&#91;</span>127<span class="cite-bracket">&#93;</span></a></sup> </p><p>In his 2020 book <i><a href="/wiki/The_Precipice:_Existential_Risk_and_the_Future_of_Humanity" title="The Precipice: Existential Risk and the Future of Humanity">The Precipice: Existential Risk and the Future of Humanity</a></i>, Toby Ord, a Senior Research Fellow at Oxford University's <a href="/wiki/Future_of_Humanity_Institute" title="Future of Humanity Institute">Future of Humanity Institute</a>, estimates the total existential risk from unaligned AI over the next 100 years at about one in ten.<sup id="cite_ref-:0_70-3" class="reference"><a href="#cite_note-:0-70"><span class="cite-bracket">&#91;</span>69<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Skepticism">Skepticism</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=29" title="Edit section: Skepticism"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Further information: <a href="/wiki/Artificial_general_intelligence#Feasibility" title="Artificial general intelligence">Artificial general intelligence §&#160;Feasibility</a></div><p><a href="/wiki/Baidu" title="Baidu">Baidu</a> Vice President <a href="/wiki/Andrew_Ng" title="Andrew Ng">Andrew Ng</a> said in 2015 that AI existential risk is "like worrying about overpopulation on Mars when we have not even set foot on the planet yet."<sup id="cite_ref-shermer_99-1" class="reference"><a href="#cite_note-shermer-99"><span class="cite-bracket">&#91;</span>98<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-130" class="reference"><a href="#cite_note-130"><span class="cite-bracket">&#91;</span>128<span class="cite-bracket">&#93;</span></a></sup> For the danger of uncontrolled advanced AI to be realized, the hypothetical AI may have to overpower or outthink any human, which some experts argue is a possibility far enough in the future to not be worth researching.<sup id="cite_ref-131" class="reference"><a href="#cite_note-131"><span class="cite-bracket">&#91;</span>129<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-132" class="reference"><a href="#cite_note-132"><span class="cite-bracket">&#91;</span>130<span class="cite-bracket">&#93;</span></a></sup> </p><p>Skeptics who believe AGI is not a short-term possibility often argue that concern about existential risk from AI is unhelpful because it could distract people from more immediate concerns about AI's impact, because it could lead to government regulation or make it more difficult to fund AI research, or because it could damage the field's reputation.<sup id="cite_ref-133" class="reference"><a href="#cite_note-133"><span class="cite-bracket">&#91;</span>131<span class="cite-bracket">&#93;</span></a></sup> AI and AI ethics researchers <a href="/wiki/Timnit_Gebru" title="Timnit Gebru">Timnit Gebru</a>, <a href="/wiki/Emily_M._Bender" title="Emily M. Bender">Emily M. Bender</a>, <a href="/wiki/Margaret_Mitchell_(scientist)" title="Margaret Mitchell (scientist)">Margaret Mitchell</a>, and Angelina McMillan-Major have argued that discussion of existential risk distracts from the immediate, ongoing harms from AI taking place today, such as data theft, worker exploitation, bias, and concentration of power.<sup id="cite_ref-134" class="reference"><a href="#cite_note-134"><span class="cite-bracket">&#91;</span>132<span class="cite-bracket">&#93;</span></a></sup> They further note the association between those warning of existential risk and <a href="/wiki/Longtermism" title="Longtermism">longtermism</a>, which they describe as a "dangerous ideology" for its unscientific and utopian nature.<sup id="cite_ref-135" class="reference"><a href="#cite_note-135"><span class="cite-bracket">&#91;</span>133<span class="cite-bracket">&#93;</span></a></sup> Gebru and <a href="/wiki/%C3%89mile_P._Torres" title="Émile P. Torres">Émile P. Torres</a> have suggested that obsession with AGI is part of a pattern of intellectual movements called <a href="/wiki/TESCREAL" title="TESCREAL">TESCREAL</a>.<sup id="cite_ref-136" class="reference"><a href="#cite_note-136"><span class="cite-bracket">&#91;</span>134<span class="cite-bracket">&#93;</span></a></sup> </p><p><a href="/wiki/Wired_(magazine)" title="Wired (magazine)"><i>Wired</i></a> editor <a href="/wiki/Kevin_Kelly_(editor)" title="Kevin Kelly (editor)">Kevin Kelly</a> argues that natural intelligence is more nuanced than AGI proponents believe, and that intelligence alone is not enough to achieve major scientific and societal breakthroughs. He argues that intelligence consists of many dimensions that are not well understood, and that conceptions of an 'intelligence ladder' are misleading. He notes the crucial role real-world experiments play in the scientific method, and that intelligence alone is no substitute for these.<sup id="cite_ref-137" class="reference"><a href="#cite_note-137"><span class="cite-bracket">&#91;</span>135<span class="cite-bracket">&#93;</span></a></sup> </p><p><a href="/wiki/Meta_Platforms" title="Meta Platforms">Meta</a> chief AI scientist <a href="/wiki/Yann_LeCun" title="Yann LeCun">Yann LeCun</a> says that AI can be made safe via continuous and iterative refinement, similar to what happened in the past with cars or rockets, and that AI will have no desire to take control.<sup id="cite_ref-138" class="reference"><a href="#cite_note-138"><span class="cite-bracket">&#91;</span>136<span class="cite-bracket">&#93;</span></a></sup> </p><p>Several skeptics emphasize the potential near-term benefits of AI. Meta CEO <a href="/wiki/Mark_Zuckerberg" title="Mark Zuckerberg">Mark Zuckerberg</a> believes AI will "unlock a huge amount of positive things", such as curing disease and increasing the safety of autonomous cars.<sup id="cite_ref-139" class="reference"><a href="#cite_note-139"><span class="cite-bracket">&#91;</span>137<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Popular_reaction">Popular reaction</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=30" title="Edit section: Popular reaction"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div><p> During a 2016 <a href="/wiki/Wired_(magazine)" title="Wired (magazine)"><i>Wired</i></a> interview of President <a href="/wiki/Barack_Obama" title="Barack Obama">Barack Obama</a> and MIT Media Lab's <a href="/wiki/Joi_Ito" title="Joi Ito">Joi Ito</a>, Ito said: <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1244412712"></p><blockquote class="templatequote"><p>There are a few people who believe that there is a fairly high-percentage chance that a generalized AI will happen in the next 10 years. But the way I look at it is that in order for that to happen, we're going to need a dozen or two different breakthroughs. So you can monitor when you think these breakthroughs will happen.</p></blockquote> <p>Obama added:<sup id="cite_ref-140" class="reference"><a href="#cite_note-140"><span class="cite-bracket">&#91;</span>138<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-141" class="reference"><a href="#cite_note-141"><span class="cite-bracket">&#91;</span>139<span class="cite-bracket">&#93;</span></a></sup> </p> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1244412712"><blockquote class="templatequote"><p>And you just have to have somebody close to the power cord. [Laughs.] Right when you see it about to happen, you gotta yank that electricity out of the wall, man.</p></blockquote> <p><a href="/wiki/Hillary_Clinton" title="Hillary Clinton">Hillary Clinton</a> wrote in <i><a href="/wiki/What_Happened_(Clinton_book)" title="What Happened (Clinton book)">What Happened</a></i>: </p> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1244412712"><blockquote class="templatequote"><p>Technologists... have warned that artificial intelligence could one day pose an existential security threat. Musk has called it "the greatest risk we face as a civilization". Think about it: Have you ever seen a movie where the machines start thinking for themselves that ends well? Every time I went out to Silicon Valley during the campaign, I came home more alarmed about this. My staff lived in fear that I'd start talking about "the rise of the robots" in some Iowa town hall. Maybe I should have. In any case, policy makers need to keep up with technology as it races ahead, instead of always playing catch-up.<sup id="cite_ref-142" class="reference"><a href="#cite_note-142"><span class="cite-bracket">&#91;</span>140<span class="cite-bracket">&#93;</span></a></sup></p></blockquote> <div class="mw-heading mw-heading4"><h4 id="Public_surveys">Public surveys</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=31" title="Edit section: Public surveys"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>In 2018, a <a href="/wiki/SurveyMonkey" title="SurveyMonkey">SurveyMonkey</a> poll of the American public by <i><a href="/wiki/USA_Today" title="USA Today">USA Today</a></i> found 68% thought the real current threat remains "human intelligence", but also found that 43% said superintelligent AI, if it were to happen, would result in "more harm than good", and that 38% said it would do "equal amounts of harm and good".<sup id="cite_ref-143" class="reference"><a href="#cite_note-143"><span class="cite-bracket">&#91;</span>141<span class="cite-bracket">&#93;</span></a></sup> </p><p>An April 2023 <a href="/wiki/YouGov" title="YouGov">YouGov</a> poll of US adults found 46% of respondents were "somewhat concerned" or "very concerned" about "the possibility that AI will cause the end of the human race on Earth", compared with 40% who were "not very concerned" or "not at all concerned."<sup id="cite_ref-144" class="reference"><a href="#cite_note-144"><span class="cite-bracket">&#91;</span>142<span class="cite-bracket">&#93;</span></a></sup> </p><p>According to an August 2023 survey by the Pew Research Centers, 52% of Americans felt more concerned than excited about new AI developments; nearly a third felt as equally concerned and excited. More Americans saw that AI would have a more helpful than hurtful impact on several areas, from healthcare and vehicle safety to product search and customer service. The main exception is privacy: 53% of Americans believe AI will lead to higher exposure of their personal information.<sup id="cite_ref-145" class="reference"><a href="#cite_note-145"><span class="cite-bracket">&#91;</span>143<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Mitigation">Mitigation</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=32" title="Edit section: Mitigation"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">See also: <a href="/wiki/AI_alignment" title="AI alignment">AI alignment</a>, <a href="/wiki/Machine_ethics" title="Machine ethics">Machine ethics</a>, <a href="/wiki/Friendly_artificial_intelligence" title="Friendly artificial intelligence">Friendly artificial intelligence</a>, and <a href="/wiki/Regulation_of_artificial_intelligence" title="Regulation of artificial intelligence">Regulation of artificial intelligence</a></div> <p>Many scholars concerned about AGI existential risk believe that extensive research into the "control problem" is essential. This problem involves determining which safeguards, algorithms, or architectures can be implemented to increase the likelihood that a recursively-improving AI remains friendly after achieving superintelligence.<sup id="cite_ref-superintelligence_5-16" class="reference"><a href="#cite_note-superintelligence-5"><span class="cite-bracket">&#91;</span>5<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-physica_scripta_146-0" class="reference"><a href="#cite_note-physica_scripta-146"><span class="cite-bracket">&#91;</span>144<span class="cite-bracket">&#93;</span></a></sup> Social measures are also proposed to mitigate AGI risks,<sup id="cite_ref-147" class="reference"><a href="#cite_note-147"><span class="cite-bracket">&#91;</span>145<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-148" class="reference"><a href="#cite_note-148"><span class="cite-bracket">&#91;</span>146<span class="cite-bracket">&#93;</span></a></sup> such as a UN-sponsored "Benevolent AGI Treaty" to ensure that only altruistic AGIs are created.<sup id="cite_ref-149" class="reference"><a href="#cite_note-149"><span class="cite-bracket">&#91;</span>147<span class="cite-bracket">&#93;</span></a></sup> Additionally, an arms control approach and a global peace treaty grounded in <a href="/wiki/International_relations_theory" title="International relations theory">international relations theory</a> have been suggested, potentially for an artificial superintelligence to be a signatory.<sup id="cite_ref-150" class="reference"><a href="#cite_note-150"><span class="cite-bracket">&#91;</span>148<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-151" class="reference"><a href="#cite_note-151"><span class="cite-bracket">&#91;</span>149<span class="cite-bracket">&#93;</span></a></sup> </p><p>Researchers at Google have proposed research into general "AI safety" issues to simultaneously mitigate both short-term risks from narrow AI and long-term risks from AGI.<sup id="cite_ref-152" class="reference"><a href="#cite_note-152"><span class="cite-bracket">&#91;</span>150<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-153" class="reference"><a href="#cite_note-153"><span class="cite-bracket">&#91;</span>151<span class="cite-bracket">&#93;</span></a></sup> A 2020 estimate places global spending on AI existential risk somewhere between $10 and $50 million, compared with global spending on AI around perhaps $40 billion. Bostrom suggests prioritizing funding for protective technologies over potentially dangerous ones.<sup id="cite_ref-:5_80-1" class="reference"><a href="#cite_note-:5-80"><span class="cite-bracket">&#91;</span>79<span class="cite-bracket">&#93;</span></a></sup> Some, like Elon Musk, advocate radical <a href="/wiki/Human_enhancement" title="Human enhancement">human cognitive enhancement</a>, such as direct neural linking between humans and machines; others argue that these technologies may pose an existential risk themselves.<sup id="cite_ref-154" class="reference"><a href="#cite_note-154"><span class="cite-bracket">&#91;</span>152<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-155" class="reference"><a href="#cite_note-155"><span class="cite-bracket">&#91;</span>153<span class="cite-bracket">&#93;</span></a></sup> Another proposed method is closely monitoring or "boxing in" an early-stage AI to prevent it from becoming too powerful. A dominant, aligned superintelligent AI might also mitigate risks from rival AIs, although its creation could present its own existential dangers.<sup id="cite_ref-156" class="reference"><a href="#cite_note-156"><span class="cite-bracket">&#91;</span>154<span class="cite-bracket">&#93;</span></a></sup> Induced <a href="/wiki/Amnesia" title="Amnesia">amnesia</a> has been proposed as a way to mitigate risks of potential AI suffering and revenge seeking.<sup id="cite_ref-aiamnesia_157-0" class="reference"><a href="#cite_note-aiamnesia-157"><span class="cite-bracket">&#91;</span>155<span class="cite-bracket">&#93;</span></a></sup> </p><p>Institutions such as the <a href="/wiki/Alignment_Research_Center" title="Alignment Research Center">Alignment Research Center</a>,<sup id="cite_ref-158" class="reference"><a href="#cite_note-158"><span class="cite-bracket">&#91;</span>156<span class="cite-bracket">&#93;</span></a></sup> the <a href="/wiki/Machine_Intelligence_Research_Institute" title="Machine Intelligence Research Institute">Machine Intelligence Research Institute</a>,<sup id="cite_ref-159" class="reference"><a href="#cite_note-159"><span class="cite-bracket">&#91;</span>157<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-160" class="reference"><a href="#cite_note-160"><span class="cite-bracket">&#91;</span>158<span class="cite-bracket">&#93;</span></a></sup> the <a href="/wiki/Future_of_Life_Institute" title="Future of Life Institute">Future of Life Institute</a>, the <a href="/wiki/Centre_for_the_Study_of_Existential_Risk" title="Centre for the Study of Existential Risk">Centre for the Study of Existential Risk</a>, and the <a href="/wiki/Center_for_Human-Compatible_AI" class="mw-redirect" title="Center for Human-Compatible AI">Center for Human-Compatible AI</a><sup id="cite_ref-161" class="reference"><a href="#cite_note-161"><span class="cite-bracket">&#91;</span>159<span class="cite-bracket">&#93;</span></a></sup> are actively engaged in researching AI risk and safety. </p> <div class="mw-heading mw-heading3"><h3 id="Views_on_banning_and_regulation">Views on banning and regulation</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=33" title="Edit section: Views on banning and regulation"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <div class="mw-heading mw-heading4"><h4 id="Banning">Banning</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=34" title="Edit section: Banning"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Some scholars have said that even if AGI poses an existential risk, attempting to ban research into artificial intelligence is still unwise, and probably futile.<sup id="cite_ref-162" class="reference"><a href="#cite_note-162"><span class="cite-bracket">&#91;</span>160<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-163" class="reference"><a href="#cite_note-163"><span class="cite-bracket">&#91;</span>161<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-164" class="reference"><a href="#cite_note-164"><span class="cite-bracket">&#91;</span>162<span class="cite-bracket">&#93;</span></a></sup> Skeptics consider AI regulation pointless, as no existential risk exists. But scholars who believe in the risk argue that relying on AI industry insiders to regulate or constrain AI research is impractical due to conflicts of interest.<sup id="cite_ref-:7_165-0" class="reference"><a href="#cite_note-:7-165"><span class="cite-bracket">&#91;</span>163<span class="cite-bracket">&#93;</span></a></sup> They also agree with skeptics that banning research would be unwise, as research could be moved to countries with looser regulations or conducted covertly.<sup id="cite_ref-:7_165-1" class="reference"><a href="#cite_note-:7-165"><span class="cite-bracket">&#91;</span>163<span class="cite-bracket">&#93;</span></a></sup> Additional challenges to bans or regulation include technology entrepreneurs' general skepticism of government regulation and potential incentives for businesses to resist regulation and <a href="/wiki/Politicization_of_science" title="Politicization of science">politicize</a> the debate.<sup id="cite_ref-166" class="reference"><a href="#cite_note-166"><span class="cite-bracket">&#91;</span>164<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading4"><h4 id="Regulation">Regulation</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=35" title="Edit section: Regulation"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">See also: <a href="/wiki/Regulation_of_algorithms" title="Regulation of algorithms">Regulation of algorithms</a> and <a href="/wiki/Regulation_of_artificial_intelligence" title="Regulation of artificial intelligence">Regulation of artificial intelligence</a></div> <p>In March 2023, the <a href="/wiki/Future_of_Life_Institute" title="Future of Life Institute">Future of Life Institute</a> drafted <i><a href="/wiki/Pause_Giant_AI_Experiments:_An_Open_Letter" title="Pause Giant AI Experiments: An Open Letter">Pause Giant AI Experiments: An Open Letter</a></i>, a petition calling on major AI developers to agree on a verifiable six-month pause of any systems "more powerful than <a href="/wiki/GPT-4" title="GPT-4">GPT-4</a>" and to use that time to institute a framework for ensuring safety; or, failing that, for governments to step in with a moratorium. The letter referred to the possibility of "a profound change in the history of life on Earth" as well as potential risks of AI-generated propaganda, loss of jobs, human obsolescence, and society-wide loss of control.<sup id="cite_ref-:9_112-1" class="reference"><a href="#cite_note-:9-112"><span class="cite-bracket">&#91;</span>110<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-167" class="reference"><a href="#cite_note-167"><span class="cite-bracket">&#91;</span>165<span class="cite-bracket">&#93;</span></a></sup> The letter was signed by prominent personalities in AI but also criticized for not focusing on current harms,<sup id="cite_ref-168" class="reference"><a href="#cite_note-168"><span class="cite-bracket">&#91;</span>166<span class="cite-bracket">&#93;</span></a></sup> missing technical nuance about when to pause,<sup id="cite_ref-169" class="reference"><a href="#cite_note-169"><span class="cite-bracket">&#91;</span>167<span class="cite-bracket">&#93;</span></a></sup> or not going far enough.<sup id="cite_ref-170" class="reference"><a href="#cite_note-170"><span class="cite-bracket">&#91;</span>168<span class="cite-bracket">&#93;</span></a></sup> </p><p>Musk called for some sort of regulation of AI development as early as 2017. According to <a href="/wiki/National_Public_Radio" class="mw-redirect" title="National Public Radio">NPR</a>, he is "clearly not thrilled" to be advocating government scrutiny that could impact his own industry, but believes the risks of going completely without oversight are too high: "Normally the way regulations are set up is when a bunch of bad things happen, there's a public outcry, and after many years a regulatory agency is set up to regulate that industry. It takes forever. That, in the past, has been bad but not something which represented a fundamental risk to the existence of civilisation." Musk states the first step would be for the government to gain "insight" into the actual status of current research, warning that "Once there is awareness, people will be extremely afraid... [as] they should be." In response, politicians expressed skepticism about the wisdom of regulating a technology that is still in development.<sup id="cite_ref-171" class="reference"><a href="#cite_note-171"><span class="cite-bracket">&#91;</span>169<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-172" class="reference"><a href="#cite_note-172"><span class="cite-bracket">&#91;</span>170<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-cnbc2_173-0" class="reference"><a href="#cite_note-cnbc2-173"><span class="cite-bracket">&#91;</span>171<span class="cite-bracket">&#93;</span></a></sup> </p><p>In 2021 the <a href="/wiki/United_Nations" title="United Nations">United Nations</a> (UN) considered banning autonomous lethal weapons, but consensus could not be reached.<sup id="cite_ref-174" class="reference"><a href="#cite_note-174"><span class="cite-bracket">&#91;</span>172<span class="cite-bracket">&#93;</span></a></sup> In July 2023 the UN <a href="/wiki/United_Nations_Security_Council" title="United Nations Security Council">Security Council</a> for the first time held a session to consider the risks and threats posed by AI to world peace and stability, along with potential benefits.<sup id="cite_ref-:13_175-0" class="reference"><a href="#cite_note-:13-175"><span class="cite-bracket">&#91;</span>173<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-176" class="reference"><a href="#cite_note-176"><span class="cite-bracket">&#91;</span>174<span class="cite-bracket">&#93;</span></a></sup> <a href="/wiki/Secretary-General_of_the_United_Nations" title="Secretary-General of the United Nations">Secretary-General</a> <a href="/wiki/Ant%C3%B3nio_Guterres" title="António Guterres">António Guterres</a> advocated the creation of a global watchdog to oversee the emerging technology, saying, "Generative AI has enormous potential for good and evil at scale. Its creators themselves have warned that much bigger, potentially catastrophic and existential risks lie ahead."<sup id="cite_ref-:12_19-1" class="reference"><a href="#cite_note-:12-19"><span class="cite-bracket">&#91;</span>18<span class="cite-bracket">&#93;</span></a></sup> At the council session, Russia said it believes AI risks are too poorly understood to be considered a threat to global stability. China argued against strict global regulation, saying countries should be able to develop their own rules, while also saying they opposed the use of AI to "create military hegemony or undermine the sovereignty of a country".<sup id="cite_ref-:13_175-1" class="reference"><a href="#cite_note-:13-175"><span class="cite-bracket">&#91;</span>173<span class="cite-bracket">&#93;</span></a></sup> </p><p>Regulation of conscious AGIs focuses on integrating them with existing human society and can be divided into considerations of their legal standing and of their moral rights.<sup id="cite_ref-:532_177-0" class="reference"><a href="#cite_note-:532-177"><span class="cite-bracket">&#91;</span>175<span class="cite-bracket">&#93;</span></a></sup> AI arms control will likely require the institutionalization of new international norms embodied in effective technical specifications combined with active monitoring and informal diplomacy by communities of experts, together with a legal and political verification process.<sup id="cite_ref-178" class="reference"><a href="#cite_note-178"><span class="cite-bracket">&#91;</span>176<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-:132_118-1" class="reference"><a href="#cite_note-:132-118"><span class="cite-bracket">&#91;</span>116<span class="cite-bracket">&#93;</span></a></sup> </p><p>In July 2023, the US government secured voluntary safety commitments from major tech companies, including <a href="/wiki/OpenAI" title="OpenAI">OpenAI</a>, <a href="/wiki/Amazon_(company)" title="Amazon (company)">Amazon</a>, <a href="/wiki/Google" title="Google">Google</a>, <a href="/wiki/Meta_Platforms" title="Meta Platforms">Meta</a>, and <a href="/wiki/Microsoft" title="Microsoft">Microsoft</a>. The companies agreed to implement safeguards, including third-party oversight and security testing by independent experts, to address concerns related to AI's potential risks and societal harms. The parties framed the commitments as an intermediate step while regulations are formed. Amba Kak, executive director of the <a href="/wiki/AI_Now_Institute" title="AI Now Institute">AI Now Institute</a>, said, "A closed-door deliberation with corporate actors resulting in voluntary safeguards isn't enough" and called for public deliberation and regulations of the kind to which companies would not voluntarily agree.<sup id="cite_ref-179" class="reference"><a href="#cite_note-179"><span class="cite-bracket">&#91;</span>177<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-180" class="reference"><a href="#cite_note-180"><span class="cite-bracket">&#91;</span>178<span class="cite-bracket">&#93;</span></a></sup> </p><p>In October 2023, U.S. President <a href="/wiki/Joe_Biden" title="Joe Biden">Joe Biden</a> issued an executive order on the "<a href="/wiki/Executive_Order_14110" title="Executive Order 14110">Safe, Secure, and Trustworthy Development and Use of Artificial Intelligence</a>".<sup id="cite_ref-181" class="reference"><a href="#cite_note-181"><span class="cite-bracket">&#91;</span>179<span class="cite-bracket">&#93;</span></a></sup> Alongside other requirements, the order mandates the development of guidelines for AI models that permit the "evasion of human control". </p> <div class="mw-heading mw-heading2"><h2 id="See_also">See also</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=36" title="Edit section: See also"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1184024115">.mw-parser-output .div-col{margin-top:0.3em;column-width:30em}.mw-parser-output .div-col-small{font-size:90%}.mw-parser-output .div-col-rules{column-rule:1px solid #aaa}.mw-parser-output .div-col dl,.mw-parser-output .div-col ol,.mw-parser-output .div-col ul{margin-top:0}.mw-parser-output .div-col li,.mw-parser-output .div-col dd{page-break-inside:avoid;break-inside:avoid-column}</style><div class="div-col" style="column-width: 20em;"> <ul><li><a href="/wiki/Appeal_to_probability" title="Appeal to probability">Appeal to probability</a></li> <li><a href="/wiki/AI_alignment" title="AI alignment">AI alignment</a></li> <li><a href="/wiki/AI_safety" title="AI safety">AI safety</a></li> <li><a href="/wiki/Butlerian_Jihad" class="mw-redirect" title="Butlerian Jihad">Butlerian Jihad</a></li> <li><a href="/wiki/Effective_altruism#Long-term_future_and_global_catastrophic_risks" title="Effective altruism">Effective altruism § Long-term future and global catastrophic risks</a></li> <li><a href="/wiki/Gray_goo" title="Gray goo">Gray goo</a></li> <li><i><a href="/wiki/Human_Compatible" title="Human Compatible">Human Compatible</a></i></li> <li><a href="/wiki/Lethal_autonomous_weapon" title="Lethal autonomous weapon">Lethal autonomous weapon</a></li> <li><a href="/wiki/Instrumental_convergence#Paperclip_maximizer" title="Instrumental convergence">Paperclip maximizer</a></li> <li><a href="/wiki/Philosophy_of_artificial_intelligence" title="Philosophy of artificial intelligence">Philosophy of artificial intelligence</a></li> <li><a href="/wiki/Robot_ethics#In_popular_culture" title="Robot ethics">Robot ethics § In popular culture</a></li> <li><a href="/wiki/Statement_on_AI_risk_of_extinction" title="Statement on AI risk of extinction">Statement on AI risk of extinction</a></li> <li><i><a href="/wiki/Superintelligence:_Paths,_Dangers,_Strategies" title="Superintelligence: Paths, Dangers, Strategies">Superintelligence: Paths, Dangers, Strategies</a></i></li> <li><a href="/wiki/Risk_of_astronomical_suffering" title="Risk of astronomical suffering">Risk of astronomical suffering</a></li> <li><a href="/wiki/System_accident" title="System accident">System accident</a></li> <li><a href="/wiki/Technological_singularity" title="Technological singularity">Technological singularity</a></li></ul> </div> <div class="mw-heading mw-heading2"><h2 id="Notes">Notes</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=37" title="Edit section: Notes"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1239543626">.mw-parser-output .reflist{margin-bottom:0.5em;list-style-type:decimal}@media screen{.mw-parser-output .reflist{font-size:90%}}.mw-parser-output .reflist .references{font-size:100%;margin-bottom:0;list-style-type:inherit}.mw-parser-output .reflist-columns-2{column-width:30em}.mw-parser-output .reflist-columns-3{column-width:25em}.mw-parser-output .reflist-columns{margin-top:0.3em}.mw-parser-output .reflist-columns ol{margin-top:0}.mw-parser-output .reflist-columns li{page-break-inside:avoid;break-inside:avoid-column}.mw-parser-output .reflist-upper-alpha{list-style-type:upper-alpha}.mw-parser-output .reflist-upper-roman{list-style-type:upper-roman}.mw-parser-output .reflist-lower-alpha{list-style-type:lower-alpha}.mw-parser-output .reflist-lower-greek{list-style-type:lower-greek}.mw-parser-output .reflist-lower-roman{list-style-type:lower-roman}</style><div class="reflist reflist-lower-alpha"> <div class="mw-references-wrap"><ol class="references"> <li id="cite_note-turing_note-12"><span class="mw-cite-backlink">^ <a href="#cite_ref-turing_note_12-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-turing_note_12-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text">In a 1951 lecture<sup id="cite_ref-10" class="reference"><a href="#cite_note-10"><span class="cite-bracket">&#91;</span>10<span class="cite-bracket">&#93;</span></a></sup> Turing argued that "It seems probable that once the machine thinking method had started, it would not take long to outstrip our feeble powers. There would be no question of the machines dying, and they would be able to converse with each other to sharpen their wits. At some stage therefore we should have to expect the machines to take control, in the way that is mentioned in Samuel Butler's Erewhon". Also in a lecture broadcast on the <a href="/wiki/BBC" title="BBC">BBC</a><sup id="cite_ref-11" class="reference"><a href="#cite_note-11"><span class="cite-bracket">&#91;</span>11<span class="cite-bracket">&#93;</span></a></sup> he expressed the opinion: "If a machine can think, it might think more intelligently than we do, and then where should we be? Even if we could keep the machines in a subservient position, for instance by turning off the power at strategic moments, we should, as a species, feel greatly humbled... This new danger... is certainly something which can give us anxiety."</span> </li> <li id="cite_note-101"><span class="mw-cite-backlink"><b><a href="#cite_ref-101">^</a></b></span> <span class="reference-text">as interpreted by <a href="/wiki/Seth_Baum" title="Seth Baum">Seth Baum</a></span> </li> </ol></div></div> <div class="mw-heading mw-heading2"><h2 id="References">References</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=38" title="Edit section: References"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1239543626"><div class="reflist"> <div class="mw-references-wrap mw-references-columns"><ol class="references"> <li id="cite_note-aima-1"><span class="mw-cite-backlink">^ <a href="#cite_ref-aima_1-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-aima_1-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-aima_1-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-aima_1-3"><sup><i><b>d</b></i></sup></a> <a href="#cite_ref-aima_1-4"><sup><i><b>e</b></i></sup></a> <a href="#cite_ref-aima_1-5"><sup><i><b>f</b></i></sup></a> <a href="#cite_ref-aima_1-6"><sup><i><b>g</b></i></sup></a></span> <span class="reference-text"><style data-mw-deduplicate="TemplateStyles:r1238218222">.mw-parser-output cite.citation{font-style:inherit;word-wrap:break-word}.mw-parser-output .citation q{quotes:"\"""\"""'""'"}.mw-parser-output .citation:target{background-color:rgba(0,127,255,0.133)}.mw-parser-output .id-lock-free.id-lock-free a{background:url("//upload.wikimedia.org/wikipedia/commons/6/65/Lock-green.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-limited.id-lock-limited a,.mw-parser-output .id-lock-registration.id-lock-registration a{background:url("//upload.wikimedia.org/wikipedia/commons/d/d6/Lock-gray-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-subscription.id-lock-subscription a{background:url("//upload.wikimedia.org/wikipedia/commons/a/aa/Lock-red-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .cs1-ws-icon a{background:url("//upload.wikimedia.org/wikipedia/commons/4/4c/Wikisource-logo.svg")right 0.1em center/12px no-repeat}body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-free a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-limited a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-registration a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-subscription a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .cs1-ws-icon a{background-size:contain;padding:0 1em 0 0}.mw-parser-output .cs1-code{color:inherit;background:inherit;border:none;padding:inherit}.mw-parser-output .cs1-hidden-error{display:none;color:var(--color-error,#d33)}.mw-parser-output .cs1-visible-error{color:var(--color-error,#d33)}.mw-parser-output .cs1-maint{display:none;color:#085;margin-left:0.3em}.mw-parser-output .cs1-kern-left{padding-left:0.2em}.mw-parser-output .cs1-kern-right{padding-right:0.2em}.mw-parser-output .citation .mw-selflink{font-weight:inherit}@media screen{.mw-parser-output .cs1-format{font-size:95%}html.skin-theme-clientpref-night .mw-parser-output .cs1-maint{color:#18911f}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .cs1-maint{color:#18911f}}</style><cite id="CITEREFRussellNorvig2009" class="citation book cs1"><a href="/wiki/Stuart_J._Russell" title="Stuart J. Russell">Russell, Stuart</a>; <a href="/wiki/Peter_Norvig" title="Peter Norvig">Norvig, Peter</a> (2009). "26.3: The Ethics and Risks of Developing Artificial Intelligence". <a href="/wiki/Artificial_Intelligence:_A_Modern_Approach" title="Artificial Intelligence: A Modern Approach"><i>Artificial Intelligence: A Modern Approach</i></a>. Prentice Hall. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-13-604259-4" title="Special:BookSources/978-0-13-604259-4"><bdi>978-0-13-604259-4</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=26.3%3A+The+Ethics+and+Risks+of+Developing+Artificial+Intelligence&amp;rft.btitle=Artificial+Intelligence%3A+A+Modern+Approach&amp;rft.pub=Prentice+Hall&amp;rft.date=2009&amp;rft.isbn=978-0-13-604259-4&amp;rft.aulast=Russell&amp;rft.aufirst=Stuart&amp;rft.au=Norvig%2C+Peter&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-2"><span class="mw-cite-backlink"><b><a href="#cite_ref-2">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBostrom2002" class="citation journal cs1"><a href="/wiki/Nick_Bostrom" title="Nick Bostrom">Bostrom, Nick</a> (2002). "Existential risks". <i><a href="/wiki/Journal_of_Evolution_and_Technology" class="mw-redirect" title="Journal of Evolution and Technology">Journal of Evolution and Technology</a></i>. <b>9</b> (1): 1–31.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Journal+of+Evolution+and+Technology&amp;rft.atitle=Existential+risks&amp;rft.volume=9&amp;rft.issue=1&amp;rft.pages=1-31&amp;rft.date=2002&amp;rft.aulast=Bostrom&amp;rft.aufirst=Nick&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-auto1-3"><span class="mw-cite-backlink">^ <a href="#cite_ref-auto1_3-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-auto1_3-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTurchinDenkenberger2018" class="citation journal cs1">Turchin, Alexey; Denkenberger, David (3 May 2018). <a rel="nofollow" class="external text" href="https://philarchive.org/rec/TURCOG-2">"Classification of global catastrophic risks connected with artificial intelligence"</a>. <i>AI &amp; Society</i>. <b>35</b> (1): 147–163. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs00146-018-0845-5">10.1007/s00146-018-0845-5</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0951-5666">0951-5666</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:19208453">19208453</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=AI+%26+Society&amp;rft.atitle=Classification+of+global+catastrophic+risks+connected+with+artificial+intelligence&amp;rft.volume=35&amp;rft.issue=1&amp;rft.pages=147-163&amp;rft.date=2018-05-03&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A19208453%23id-name%3DS2CID&amp;rft.issn=0951-5666&amp;rft_id=info%3Adoi%2F10.1007%2Fs00146-018-0845-5&amp;rft.aulast=Turchin&amp;rft.aufirst=Alexey&amp;rft.au=Denkenberger%2C+David&amp;rft_id=https%3A%2F%2Fphilarchive.org%2Frec%2FTURCOG-2&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-4"><span class="mw-cite-backlink"><b><a href="#cite_ref-4">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBalesD&#39;AlessandroKirk-Giannini2024" class="citation journal cs1">Bales, Adam; D'Alessandro, William; Kirk-Giannini, Cameron Domenico (2024). <a rel="nofollow" class="external text" href="https://doi.org/10.1111%2Fphc3.12964">"Artificial Intelligence: Arguments for Catastrophic Risk"</a>. <i><a href="/wiki/Philosophy_Compass" title="Philosophy Compass">Philosophy Compass</a></i>. <b>19</b> (2). <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2401.15487">2401.15487</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1111%2Fphc3.12964">10.1111/phc3.12964</a></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Philosophy+Compass&amp;rft.atitle=Artificial+Intelligence%3A+Arguments+for+Catastrophic+Risk&amp;rft.volume=19&amp;rft.issue=2&amp;rft.date=2024&amp;rft_id=info%3Aarxiv%2F2401.15487&amp;rft_id=info%3Adoi%2F10.1111%2Fphc3.12964&amp;rft.aulast=Bales&amp;rft.aufirst=Adam&amp;rft.au=D%27Alessandro%2C+William&amp;rft.au=Kirk-Giannini%2C+Cameron+Domenico&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1111%252Fphc3.12964&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-superintelligence-5"><span class="mw-cite-backlink">^ <a href="#cite_ref-superintelligence_5-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-superintelligence_5-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-superintelligence_5-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-superintelligence_5-3"><sup><i><b>d</b></i></sup></a> <a href="#cite_ref-superintelligence_5-4"><sup><i><b>e</b></i></sup></a> <a href="#cite_ref-superintelligence_5-5"><sup><i><b>f</b></i></sup></a> <a href="#cite_ref-superintelligence_5-6"><sup><i><b>g</b></i></sup></a> <a href="#cite_ref-superintelligence_5-7"><sup><i><b>h</b></i></sup></a> <a href="#cite_ref-superintelligence_5-8"><sup><i><b>i</b></i></sup></a> <a href="#cite_ref-superintelligence_5-9"><sup><i><b>j</b></i></sup></a> <a href="#cite_ref-superintelligence_5-10"><sup><i><b>k</b></i></sup></a> <a href="#cite_ref-superintelligence_5-11"><sup><i><b>l</b></i></sup></a> <a href="#cite_ref-superintelligence_5-12"><sup><i><b>m</b></i></sup></a> <a href="#cite_ref-superintelligence_5-13"><sup><i><b>n</b></i></sup></a> <a href="#cite_ref-superintelligence_5-14"><sup><i><b>o</b></i></sup></a> <a href="#cite_ref-superintelligence_5-15"><sup><i><b>p</b></i></sup></a> <a href="#cite_ref-superintelligence_5-16"><sup><i><b>q</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBostrom2014" class="citation book cs1"><a href="/wiki/Nick_Bostrom" title="Nick Bostrom">Bostrom, Nick</a> (2014). <a href="/wiki/Superintelligence:_Paths,_Dangers,_Strategies" title="Superintelligence: Paths, Dangers, Strategies"><i>Superintelligence: Paths, Dangers, Strategies</i></a> (First&#160;ed.). Oxford University Press. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-19-967811-2" title="Special:BookSources/978-0-19-967811-2"><bdi>978-0-19-967811-2</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Superintelligence%3A+Paths%2C+Dangers%2C+Strategies&amp;rft.edition=First&amp;rft.pub=Oxford+University+Press&amp;rft.date=2014&amp;rft.isbn=978-0-19-967811-2&amp;rft.aulast=Bostrom&amp;rft.aufirst=Nick&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-6"><span class="mw-cite-backlink"><b><a href="#cite_ref-6">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFVynck2023" class="citation news cs1">Vynck, Gerrit De (23 May 2023). <a rel="nofollow" class="external text" href="https://www.washingtonpost.com/technology/2023/05/20/ai-existential-risk-debate/">"The debate over whether AI will destroy us is dividing Silicon Valley"</a>. <i>Washington Post</i>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0190-8286">0190-8286</a><span class="reference-accessdate">. Retrieved <span class="nowrap">27 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Washington+Post&amp;rft.atitle=The+debate+over+whether+AI+will+destroy+us+is+dividing+Silicon+Valley&amp;rft.date=2023-05-23&amp;rft.issn=0190-8286&amp;rft.aulast=Vynck&amp;rft.aufirst=Gerrit+De&amp;rft_id=https%3A%2F%2Fwww.washingtonpost.com%2Ftechnology%2F2023%2F05%2F20%2Fai-existential-risk-debate%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-7"><span class="mw-cite-backlink"><b><a href="#cite_ref-7">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMetz2023" class="citation news cs1">Metz, Cade (10 June 2023). <a rel="nofollow" class="external text" href="https://www.nytimes.com/2023/06/10/technology/ai-humanity.html">"How Could A.I. Destroy Humanity?"</a>. <i>The New York Times</i>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0362-4331">0362-4331</a><span class="reference-accessdate">. Retrieved <span class="nowrap">27 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+New+York+Times&amp;rft.atitle=How+Could+A.I.+Destroy+Humanity%3F&amp;rft.date=2023-06-10&amp;rft.issn=0362-4331&amp;rft.aulast=Metz&amp;rft.aufirst=Cade&amp;rft_id=https%3A%2F%2Fwww.nytimes.com%2F2023%2F06%2F10%2Ftechnology%2Fai-humanity.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-8"><span class="mw-cite-backlink"><b><a href="#cite_ref-8">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.cbsnews.com/news/godfather-of-artificial-intelligence-weighs-in-on-the-past-and-potential-of-artificial-intelligence/">"<span class="cs1-kern-left"></span>'Godfather of artificial intelligence' weighs in on the past and potential of AI"</a>. <i>www.cbsnews.com</i>. 25 March 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">10 April</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=www.cbsnews.com&amp;rft.atitle=%27Godfather+of+artificial+intelligence%27+weighs+in+on+the+past+and+potential+of+AI&amp;rft.date=2023-03-25&amp;rft_id=https%3A%2F%2Fwww.cbsnews.com%2Fnews%2Fgodfather-of-artificial-intelligence-weighs-in-on-the-past-and-potential-of-artificial-intelligence%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-9"><span class="mw-cite-backlink"><b><a href="#cite_ref-9">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://yoshuabengio.org/2023/05/22/how-rogue-ais-may-arise/">"How Rogue AIs may Arise"</a>. <i>yoshuabengio.org</i>. 26 May 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">26 May</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=yoshuabengio.org&amp;rft.atitle=How+Rogue+AIs+may+Arise&amp;rft.date=2023-05-26&amp;rft_id=https%3A%2F%2Fyoshuabengio.org%2F2023%2F05%2F22%2Fhow-rogue-ais-may-arise%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-10"><span class="mw-cite-backlink"><b><a href="#cite_ref-10">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTuring1951" class="citation speech cs1">Turing, Alan (1951). <a rel="nofollow" class="external text" href="https://turingarchive.kings.cam.ac.uk/publications-lectures-and-talks-amtb/amt-b-4"><i>Intelligent machinery, a heretical theory</i></a> (Speech). Lecture given to '51 Society'. Manchester: The Turing Digital Archive. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220926004549/https://turingarchive.kings.cam.ac.uk/publications-lectures-and-talks-amtb/amt-b-4">Archived</a> from the original on 26 September 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">22 July</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Intelligent+machinery%2C+a+heretical+theory&amp;rft.place=Manchester&amp;rft.pub=The+Turing+Digital+Archive&amp;rft.date=1951&amp;rft.aulast=Turing&amp;rft.aufirst=Alan&amp;rft_id=https%3A%2F%2Fturingarchive.kings.cam.ac.uk%2Fpublications-lectures-and-talks-amtb%2Famt-b-4&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-11"><span class="mw-cite-backlink"><b><a href="#cite_ref-11">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTuring1951" class="citation episode cs1">Turing, Alan (15 May 1951). "Can digital computers think?". <i>Automatic Calculating Machines</i>. Episode 2. BBC. <a rel="nofollow" class="external text" href="https://turingarchive.kings.cam.ac.uk/publications-lectures-and-talks-amtb/amt-b-6">Can digital computers think?</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Automatic+Calculating+Machines&amp;rft.series=Episode+2&amp;rft.date=1951-05-15&amp;rft.aulast=Turing&amp;rft.aufirst=Alan&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-Parkin-13"><span class="mw-cite-backlink">^ <a href="#cite_ref-Parkin_13-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Parkin_13-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFParkin2015" class="citation news cs1">Parkin, Simon (14 June 2015). <a rel="nofollow" class="external text" href="https://www.theguardian.com/tv-and-radio/2015/jun/14/science-fiction-no-more-humans-tv-artificial-intelligence">"Science fiction no more? Channel 4's Humans and our rogue AI obsessions"</a>. <i><a href="/wiki/The_Guardian" title="The Guardian">The Guardian</a></i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180205184322/https://www.theguardian.com/tv-and-radio/2015/jun/14/science-fiction-no-more-humans-tv-artificial-intelligence">Archived</a> from the original on 5 February 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">5 February</span> 2018</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Guardian&amp;rft.atitle=Science+fiction+no+more%3F+Channel+4%27s+Humans+and+our+rogue+AI+obsessions&amp;rft.date=2015-06-14&amp;rft.aulast=Parkin&amp;rft.aufirst=Simon&amp;rft_id=https%3A%2F%2Fwww.theguardian.com%2Ftv-and-radio%2F2015%2Fjun%2F14%2Fscience-fiction-no-more-humans-tv-artificial-intelligence&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-Jackson-14"><span class="mw-cite-backlink">^ <a href="#cite_ref-Jackson_14-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Jackson_14-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFJackson" class="citation web cs1">Jackson, Sarah. <a rel="nofollow" class="external text" href="https://www.businessinsider.com/chatgpt-openai-ceo-worst-case-ai-lights-out-for-all-2023-1">"The CEO of the company behind AI chatbot ChatGPT says the worst-case scenario for artificial intelligence is 'lights out for all of us'<span class="cs1-kern-right"></span>"</a>. <i>Business Insider</i><span class="reference-accessdate">. Retrieved <span class="nowrap">10 April</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Business+Insider&amp;rft.atitle=The+CEO+of+the+company+behind+AI+chatbot+ChatGPT+says+the+worst-case+scenario+for+artificial+intelligence+is+%27lights+out+for+all+of+us%27&amp;rft.aulast=Jackson&amp;rft.aufirst=Sarah&amp;rft_id=https%3A%2F%2Fwww.businessinsider.com%2Fchatgpt-openai-ceo-worst-case-ai-lights-out-for-all-2023-1&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-15"><span class="mw-cite-backlink"><b><a href="#cite_ref-15">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.humanetech.com/podcast/the-ai-dilemma">"The AI Dilemma"</a>. <i>www.humanetech.com</i><span class="reference-accessdate">. Retrieved <span class="nowrap">10 April</span> 2023</span>. <q>50% of AI researchers believe there's a 10% or greater chance that humans go extinct from our inability to control AI.</q></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=www.humanetech.com&amp;rft.atitle=The+AI+Dilemma&amp;rft_id=https%3A%2F%2Fwww.humanetech.com%2Fpodcast%2Fthe-ai-dilemma&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-:8-16"><span class="mw-cite-backlink">^ <a href="#cite_ref-:8_16-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:8_16-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://aiimpacts.org/2022-expert-survey-on-progress-in-ai/">"2022 Expert Survey on Progress in AI"</a>. <i>AI Impacts</i>. 4 August 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">10 April</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=AI+Impacts&amp;rft.atitle=2022+Expert+Survey+on+Progress+in+AI&amp;rft.date=2022-08-04&amp;rft_id=https%3A%2F%2Faiimpacts.org%2F2022-expert-survey-on-progress-in-ai%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-17"><span class="mw-cite-backlink"><b><a href="#cite_ref-17">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRoose2023" class="citation news cs1">Roose, Kevin (30 May 2023). <a rel="nofollow" class="external text" href="https://www.nytimes.com/2023/05/30/technology/ai-threat-warning.html">"A.I. Poses 'Risk of Extinction,' Industry Leaders Warn"</a>. <i>The New York Times</i>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0362-4331">0362-4331</a><span class="reference-accessdate">. Retrieved <span class="nowrap">3 June</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+New+York+Times&amp;rft.atitle=A.I.+Poses+%27Risk+of+Extinction%2C%27+Industry+Leaders+Warn&amp;rft.date=2023-05-30&amp;rft.issn=0362-4331&amp;rft.aulast=Roose&amp;rft.aufirst=Kevin&amp;rft_id=https%3A%2F%2Fwww.nytimes.com%2F2023%2F05%2F30%2Ftechnology%2Fai-threat-warning.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-18"><span class="mw-cite-backlink"><b><a href="#cite_ref-18">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSunak2023" class="citation magazine cs1">Sunak, Rishi (14 June 2023). <a rel="nofollow" class="external text" href="https://time.com/6287253/uk-rishi-sunak-ai-regulation/">"Rishi Sunak Wants the U.K. to Be a Key Player in Global AI Regulation"</a>. <i>Time</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Time&amp;rft.atitle=Rishi+Sunak+Wants+the+U.K.+to+Be+a+Key+Player+in+Global+AI+Regulation&amp;rft.date=2023-06-14&amp;rft.aulast=Sunak&amp;rft.aufirst=Rishi&amp;rft_id=https%3A%2F%2Ftime.com%2F6287253%2Fuk-rishi-sunak-ai-regulation%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-:12-19"><span class="mw-cite-backlink">^ <a href="#cite_ref-:12_19-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:12_19-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFung2023" class="citation news cs1">Fung, Brian (18 July 2023). <a rel="nofollow" class="external text" href="https://www.cnn.com/2023/07/18/tech/un-ai-agency/index.html">"UN Secretary General embraces calls for a new UN agency on AI in the face of 'potentially catastrophic and existential risks'<span class="cs1-kern-right"></span>"</a>. <i>CNN Business</i><span class="reference-accessdate">. Retrieved <span class="nowrap">20 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=CNN+Business&amp;rft.atitle=UN+Secretary+General+embraces+calls+for+a+new+UN+agency+on+AI+in+the+face+of+%27potentially+catastrophic+and+existential+risks%27&amp;rft.date=2023-07-18&amp;rft.aulast=Fung&amp;rft.aufirst=Brian&amp;rft_id=https%3A%2F%2Fwww.cnn.com%2F2023%2F07%2F18%2Ftech%2Fun-ai-agency%2Findex.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-yudkowsky-global-risk-20"><span class="mw-cite-backlink">^ <a href="#cite_ref-yudkowsky-global-risk_20-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-yudkowsky-global-risk_20-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-yudkowsky-global-risk_20-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-yudkowsky-global-risk_20-3"><sup><i><b>d</b></i></sup></a> <a href="#cite_ref-yudkowsky-global-risk_20-4"><sup><i><b>e</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFYudkowsky2008" class="citation journal cs1">Yudkowsky, Eliezer (2008). <a rel="nofollow" class="external text" href="https://intelligence.org/files/AIPosNegFactor.pdf">"Artificial Intelligence as a Positive and Negative Factor in Global Risk"</a> <span class="cs1-format">(PDF)</span>. <i>Global Catastrophic Risks</i>: 308–345. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2008gcr..book..303Y">2008gcr..book..303Y</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20130302173022/http://intelligence.org/files/AIPosNegFactor.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2 March 2013<span class="reference-accessdate">. Retrieved <span class="nowrap">27 August</span> 2018</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Global+Catastrophic+Risks&amp;rft.atitle=Artificial+Intelligence+as+a+Positive+and+Negative+Factor+in+Global+Risk&amp;rft.pages=308-345&amp;rft.date=2008&amp;rft_id=info%3Abibcode%2F2008gcr..book..303Y&amp;rft.aulast=Yudkowsky&amp;rft.aufirst=Eliezer&amp;rft_id=https%3A%2F%2Fintelligence.org%2Ffiles%2FAIPosNegFactor.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-research-priorities-21"><span class="mw-cite-backlink"><b><a href="#cite_ref-research-priorities_21-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRussellDeweyTegmark2015" class="citation journal cs1"><a href="/wiki/Stuart_J._Russell" title="Stuart J. Russell">Russell, Stuart</a>; Dewey, Daniel; <a href="/wiki/Max_Tegmark" title="Max Tegmark">Tegmark, Max</a> (2015). <a rel="nofollow" class="external text" href="https://futureoflife.org/data/documents/research_priorities.pdf">"Research Priorities for Robust and Beneficial Artificial Intelligence"</a> <span class="cs1-format">(PDF)</span>. <i>AI Magazine</i>. Association for the Advancement of Artificial Intelligence: 105–114. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1602.03506">1602.03506</a></span>. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2016arXiv160203506R">2016arXiv160203506R</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190804145930/https://futureoflife.org/data/documents/research_priorities.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 4 August 2019<span class="reference-accessdate">. Retrieved <span class="nowrap">10 August</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=AI+Magazine&amp;rft.atitle=Research+Priorities+for+Robust+and+Beneficial+Artificial+Intelligence&amp;rft.pages=105-114&amp;rft.date=2015&amp;rft_id=info%3Aarxiv%2F1602.03506&amp;rft_id=info%3Abibcode%2F2016arXiv160203506R&amp;rft.aulast=Russell&amp;rft.aufirst=Stuart&amp;rft.au=Dewey%2C+Daniel&amp;rft.au=Tegmark%2C+Max&amp;rft_id=https%3A%2F%2Ffutureoflife.org%2Fdata%2Fdocuments%2Fresearch_priorities.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span>, cited in <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://futureoflife.org/ai-open-letter">"AI Open Letter - Future of Life Institute"</a>. <i>Future of Life Institute</i>. January 2015. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190810020404/https://futureoflife.org/ai-open-letter">Archived</a> from the original on 10 August 2019<span class="reference-accessdate">. Retrieved <span class="nowrap">9 August</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Future+of+Life+Institute&amp;rft.atitle=AI+Open+Letter+-+Future+of+Life+Institute&amp;rft.date=2015-01&amp;rft_id=https%3A%2F%2Ffutureoflife.org%2Fai-open-letter&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-vanity-22"><span class="mw-cite-backlink">^ <a href="#cite_ref-vanity_22-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-vanity_22-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-vanity_22-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDowd2017" class="citation news cs1">Dowd, Maureen (April 2017). <a rel="nofollow" class="external text" href="https://www.vanityfair.com/news/2017/03/elon-musk-billion-dollar-crusade-to-stop-ai-space-x">"Elon Musk's Billion-Dollar Crusade to Stop the A.I. Apocalypse"</a>. <i>The Hive</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180726041656/https://www.vanityfair.com/news/2017/03/elon-musk-billion-dollar-crusade-to-stop-ai-space-x">Archived</a> from the original on 26 July 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">27 November</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Hive&amp;rft.atitle=Elon+Musk%27s+Billion-Dollar+Crusade+to+Stop+the+A.I.+Apocalypse&amp;rft.date=2017-04&amp;rft.aulast=Dowd&amp;rft.aufirst=Maureen&amp;rft_id=https%3A%2F%2Fwww.vanityfair.com%2Fnews%2F2017%2F03%2Felon-musk-billion-dollar-crusade-to-stop-ai-space-x&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-23"><span class="mw-cite-backlink"><b><a href="#cite_ref-23">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.deepmind.com/blog/alphago-zero-starting-from-scratch">"AlphaGo Zero: Starting from scratch"</a>. <i>www.deepmind.com</i>. 18 October 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">28 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=www.deepmind.com&amp;rft.atitle=AlphaGo+Zero%3A+Starting+from+scratch&amp;rft.date=2017-10-18&amp;rft_id=https%3A%2F%2Fwww.deepmind.com%2Fblog%2Falphago-zero-starting-from-scratch&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-24"><span class="mw-cite-backlink"><b><a href="#cite_ref-24">^</a></b></span> <span class="reference-text">Breuer, Hans-Peter. <a rel="nofollow" class="external text" href="https://www.jstor.org/pss/436868">'Samuel Butler's "the Book of the Machines" and the Argument from Design.'</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230315233257/https://www.jstor.org/stable/436868">Archived</a> 15 March 2023 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a> Modern Philology, Vol. 72, No. 4 (May 1975), pp. 365–383.</span> </li> <li id="cite_note-oxfordjournals-25"><span class="mw-cite-backlink"><b><a href="#cite_ref-oxfordjournals_25-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTuring1996" class="citation journal cs1">Turing, A M (1996). <a rel="nofollow" class="external text" href="https://doi.org/10.1093%2Fphilmat%2F4.3.256">"Intelligent Machinery, A Heretical Theory"</a>. <i>1951, Reprinted Philosophia Mathematica</i>. <b>4</b> (3): 256–260. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1093%2Fphilmat%2F4.3.256">10.1093/philmat/4.3.256</a></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=1951%2C+Reprinted+Philosophia+Mathematica&amp;rft.atitle=Intelligent+Machinery%2C+A+Heretical+Theory&amp;rft.volume=4&amp;rft.issue=3&amp;rft.pages=256-260&amp;rft.date=1996&amp;rft_id=info%3Adoi%2F10.1093%2Fphilmat%2F4.3.256&amp;rft.aulast=Turing&amp;rft.aufirst=A+M&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1093%252Fphilmat%252F4.3.256&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-26"><span class="mw-cite-backlink"><b><a href="#cite_ref-26">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHilliard2017" class="citation news cs1">Hilliard, Mark (2017). <a rel="nofollow" class="external text" href="https://www.irishtimes.com/business/innovation/the-ai-apocalypse-will-the-human-race-soon-be-terminated-1.3019220">"The AI apocalypse: will the human race soon be terminated?"</a>. <i>The Irish Times</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200522114127/https://www.irishtimes.com/business/innovation/the-ai-apocalypse-will-the-human-race-soon-be-terminated-1.3019220">Archived</a> from the original on 22 May 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">15 March</span> 2020</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Irish+Times&amp;rft.atitle=The+AI+apocalypse%3A+will+the+human+race+soon+be+terminated%3F&amp;rft.date=2017&amp;rft.aulast=Hilliard&amp;rft.aufirst=Mark&amp;rft_id=https%3A%2F%2Fwww.irishtimes.com%2Fbusiness%2Finnovation%2Fthe-ai-apocalypse-will-the-human-race-soon-be-terminated-1.3019220&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-27"><span class="mw-cite-backlink"><b><a href="#cite_ref-27">^</a></b></span> <span class="reference-text">I.J. Good, <a rel="nofollow" class="external text" href="http://commonsenseatheism.com/wp-content/uploads/2011/02/Good-Speculations-Concerning-the-First-Ultraintelligent-Machine.pdf">"Speculations Concerning the First Ultraintelligent Machine"</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20111128085512/http://commonsenseatheism.com/wp-content/uploads/2011/02/Good-Speculations-Concerning-the-First-Ultraintelligent-Machine.pdf">Archived</a> 2011-11-28 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a> (<a rel="nofollow" class="external text" href="http://www.acceleratingfuture.com/pages/ultraintelligentmachine.html">HTML</a> ), <i>Advances in Computers</i>, vol. 6, 1965.</span> </li> <li id="cite_note-28"><span class="mw-cite-backlink"><b><a href="#cite_ref-28">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRussellNorvig2003" class="citation book cs1">Russell, Stuart J.; Norvig, Peter (2003). "Section 26.3: The Ethics and Risks of Developing Artificial Intelligence". <a href="/wiki/Artificial_Intelligence:_A_Modern_Approach" title="Artificial Intelligence: A Modern Approach"><i>Artificial Intelligence: A Modern Approach</i></a>. Upper Saddle River, New Jersey: Prentice Hall. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-13-790395-5" title="Special:BookSources/978-0-13-790395-5"><bdi>978-0-13-790395-5</bdi></a>. <q>Similarly, Marvin Minsky once suggested that an AI program designed to solve the Riemann Hypothesis might end up taking over all the resources of Earth to build more powerful supercomputers to help achieve its goal.</q></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Section+26.3%3A+The+Ethics+and+Risks+of+Developing+Artificial+Intelligence&amp;rft.btitle=Artificial+Intelligence%3A+A+Modern+Approach&amp;rft.place=Upper+Saddle+River%2C+New+Jersey&amp;rft.pub=Prentice+Hall&amp;rft.date=2003&amp;rft.isbn=978-0-13-790395-5&amp;rft.aulast=Russell&amp;rft.aufirst=Stuart+J.&amp;rft.au=Norvig%2C+Peter&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-29"><span class="mw-cite-backlink"><b><a href="#cite_ref-29">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBarrat2013" class="citation book cs1">Barrat, James (2013). <i>Our final invention: artificial intelligence and the end of the human era</i> (First&#160;ed.). New York: St. Martin's Press. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-312-62237-4" title="Special:BookSources/978-0-312-62237-4"><bdi>978-0-312-62237-4</bdi></a>. <q>In the bio, playfully written in the third person, Good summarized his life's milestones, including a probably never before seen account of his work at Bletchley Park with Turing. But here's what he wrote in 1998 about the first superintelligence, and his late-in-the-game U-turn: [The paper] 'Speculations Concerning the First Ultra-intelligent Machine' (1965)...began: 'The survival of man depends on the early construction of an ultra-intelligent machine.' Those were his [Good's] words during the Cold War, and he now suspects that 'survival' should be replaced by 'extinction.' He thinks that, because of international competition, we cannot prevent the machines from taking over. He thinks we are lemmings. He said also that 'probably Man will construct the deus ex machina in his own image.'<span class="cs1-kern-right"></span></q></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Our+final+invention%3A+artificial+intelligence+and+the+end+of+the+human+era&amp;rft.place=New+York&amp;rft.edition=First&amp;rft.pub=St.+Martin%27s+Press&amp;rft.date=2013&amp;rft.isbn=978-0-312-62237-4&amp;rft.aulast=Barrat&amp;rft.aufirst=James&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-30"><span class="mw-cite-backlink"><b><a href="#cite_ref-30">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAnderson2014" class="citation news cs1">Anderson, Kurt (26 November 2014). <a rel="nofollow" class="external text" href="https://www.vanityfair.com/news/tech/2014/11/artificial-intelligence-singularity-theory">"Enthusiasts and Skeptics Debate Artificial Intelligence"</a>. <i><a href="/wiki/Vanity_Fair_(magazine)" title="Vanity Fair (magazine)">Vanity Fair</a></i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160122025154/http://www.vanityfair.com/news/tech/2014/11/artificial-intelligence-singularity-theory">Archived</a> from the original on 22 January 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">30 January</span> 2016</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Vanity+Fair&amp;rft.atitle=Enthusiasts+and+Skeptics+Debate+Artificial+Intelligence&amp;rft.date=2014-11-26&amp;rft.aulast=Anderson&amp;rft.aufirst=Kurt&amp;rft_id=https%3A%2F%2Fwww.vanityfair.com%2Fnews%2Ftech%2F2014%2F11%2Fartificial-intelligence-singularity-theory&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-31"><span class="mw-cite-backlink"><b><a href="#cite_ref-31">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMetz2018" class="citation news cs1">Metz, Cade (9 June 2018). <a rel="nofollow" class="external text" href="https://www.nytimes.com/2018/06/09/technology/elon-musk-mark-zuckerberg-artificial-intelligence.html">"Mark Zuckerberg, Elon Musk and the Feud Over Killer Robots"</a>. <i>The New York Times</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210215051949/https://www.nytimes.com/2018/06/09/technology/elon-musk-mark-zuckerberg-artificial-intelligence.html">Archived</a> from the original on 15 February 2021<span class="reference-accessdate">. Retrieved <span class="nowrap">3 April</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+New+York+Times&amp;rft.atitle=Mark+Zuckerberg%2C+Elon+Musk+and+the+Feud+Over+Killer+Robots&amp;rft.date=2018-06-09&amp;rft.aulast=Metz&amp;rft.aufirst=Cade&amp;rft_id=https%3A%2F%2Fwww.nytimes.com%2F2018%2F06%2F09%2Ftechnology%2Felon-musk-mark-zuckerberg-artificial-intelligence.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-32"><span class="mw-cite-backlink"><b><a href="#cite_ref-32">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHsu2012" class="citation news cs1">Hsu, Jeremy (1 March 2012). <a rel="nofollow" class="external text" href="https://www.nbcnews.com/id/wbna46590591">"Control dangerous AI before it controls us, one expert says"</a>. <i><a href="/wiki/NBC_News" title="NBC News">NBC News</a></i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160202173621/http://www.nbcnews.com/id/46590591/ns/technology_and_science-innovation">Archived</a> from the original on 2 February 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">28 January</span> 2016</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=NBC+News&amp;rft.atitle=Control+dangerous+AI+before+it+controls+us%2C+one+expert+says&amp;rft.date=2012-03-01&amp;rft.aulast=Hsu&amp;rft.aufirst=Jeremy&amp;rft_id=https%3A%2F%2Fwww.nbcnews.com%2Fid%2Fwbna46590591&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-hawking_editorial-33"><span class="mw-cite-backlink">^ <a href="#cite_ref-hawking_editorial_33-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-hawking_editorial_33-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-hawking_editorial_33-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.independent.co.uk/news/science/stephen-hawking-transcendence-looks-at-the-implications-of-artificial-intelligence--but-are-we-taking-ai-seriously-enough-9313474.html">"Stephen Hawking: 'Transcendence looks at the implications of artificial intelligence&#160;– but are we taking AI seriously enough?'<span class="cs1-kern-right"></span>"</a>. <a href="/wiki/The_Independent_(UK)" class="mw-redirect" title="The Independent (UK)">The Independent (UK)</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20150925153716/http://www.independent.co.uk/news/science/stephen-hawking-transcendence-looks-at-the-implications-of-artificial-intelligence--but-are-we-taking-ai-seriously-enough-9313474.html">Archived</a> from the original on 25 September 2015<span class="reference-accessdate">. Retrieved <span class="nowrap">3 December</span> 2014</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.atitle=Stephen+Hawking%3A+%27Transcendence+looks+at+the+implications+of+artificial+intelligence+%E2%80%93+but+are+we+taking+AI+seriously+enough%3F%27&amp;rft_id=https%3A%2F%2Fwww.independent.co.uk%2Fnews%2Fscience%2Fstephen-hawking-transcendence-looks-at-the-implications-of-artificial-intelligence--but-are-we-taking-ai-seriously-enough-9313474.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-bbc_on_hawking_editorial-34"><span class="mw-cite-backlink"><b><a href="#cite_ref-bbc_on_hawking_editorial_34-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.bbc.com/news/technology-30290540">"Stephen Hawking warns artificial intelligence could end mankind"</a>. <a href="/wiki/BBC" title="BBC">BBC</a>. 2 December 2014. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20151030054329/http://www.bbc.com/news/technology-30290540">Archived</a> from the original on 30 October 2015<span class="reference-accessdate">. Retrieved <span class="nowrap">3 December</span> 2014</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.atitle=Stephen+Hawking+warns+artificial+intelligence+could+end+mankind&amp;rft.date=2014-12-02&amp;rft_id=https%3A%2F%2Fwww.bbc.com%2Fnews%2Ftechnology-30290540&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-35"><span class="mw-cite-backlink"><b><a href="#cite_ref-35">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFEadicicco2015" class="citation news cs1">Eadicicco, Lisa (28 January 2015). <a rel="nofollow" class="external text" href="http://www.businessinsider.com/bill-gates-artificial-intelligence-2015-1">"Bill Gates: Elon Musk Is Right, We Should All Be Scared Of Artificial Intelligence Wiping Out Humanity"</a>. <i><a href="/wiki/Business_Insider" title="Business Insider">Business Insider</a></i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160226090602/http://www.businessinsider.com/bill-gates-artificial-intelligence-2015-1">Archived</a> from the original on 26 February 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">30 January</span> 2016</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Business+Insider&amp;rft.atitle=Bill+Gates%3A+Elon+Musk+Is+Right%2C+We+Should+All+Be+Scared+Of+Artificial+Intelligence+Wiping+Out+Humanity&amp;rft.date=2015-01-28&amp;rft.aulast=Eadicicco&amp;rft.aufirst=Lisa&amp;rft_id=http%3A%2F%2Fwww.businessinsider.com%2Fbill-gates-artificial-intelligence-2015-1&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-36"><span class="mw-cite-backlink"><b><a href="#cite_ref-36">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://futureoflife.org/misc/open_letter">"Research Priorities for Robust and Beneficial Artificial Intelligence: an Open Letter"</a>. <a href="/wiki/Future_of_Life_Institute" title="Future of Life Institute">Future of Life Institute</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20150115160823/http://futureoflife.org/misc/open_letter">Archived</a> from the original on 15 January 2015<span class="reference-accessdate">. Retrieved <span class="nowrap">23 October</span> 2015</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Research+Priorities+for+Robust+and+Beneficial+Artificial+Intelligence%3A+an+Open+Letter&amp;rft.pub=Future+of+Life+Institute&amp;rft_id=http%3A%2F%2Ffutureoflife.org%2Fmisc%2Fopen_letter&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-37"><span class="mw-cite-backlink"><b><a href="#cite_ref-37">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation journal cs1"><a rel="nofollow" class="external text" href="https://doi.org/10.1038%2F532413a">"Anticipating artificial intelligence"</a>. <i>Nature</i>. <b>532</b> (7600): 413. 2016. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2016Natur.532Q.413.">2016Natur.532Q.413.</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1038%2F532413a">10.1038/532413a</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1476-4687">1476-4687</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a>&#160;<a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/27121801">27121801</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:4399193">4399193</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Nature&amp;rft.atitle=Anticipating+artificial+intelligence&amp;rft.volume=532&amp;rft.issue=7600&amp;rft.pages=413&amp;rft.date=2016&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A4399193%23id-name%3DS2CID&amp;rft_id=info%3Abibcode%2F2016Natur.532Q.413.&amp;rft.issn=1476-4687&amp;rft_id=info%3Adoi%2F10.1038%2F532413a&amp;rft_id=info%3Apmid%2F27121801&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1038%252F532413a&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-38"><span class="mw-cite-backlink"><b><a href="#cite_ref-38">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFChristian2020" class="citation book cs1">Christian, Brian (6 October 2020). <a rel="nofollow" class="external text" href="https://brianchristian.org/the-alignment-problem/"><i>The Alignment Problem: Machine Learning and Human Values</i></a>. <a href="/wiki/W._W._Norton_%26_Company" title="W. W. Norton &amp; Company">W. W. Norton &amp; Company</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-393-63582-9" title="Special:BookSources/978-0-393-63582-9"><bdi>978-0-393-63582-9</bdi></a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20211205135022/https://brianchristian.org/the-alignment-problem/">Archived</a> from the original on 5 December 2021<span class="reference-accessdate">. Retrieved <span class="nowrap">5 December</span> 2021</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=The+Alignment+Problem%3A+Machine+Learning+and+Human+Values&amp;rft.pub=W.+W.+Norton+%26+Company&amp;rft.date=2020-10-06&amp;rft.isbn=978-0-393-63582-9&amp;rft.aulast=Christian&amp;rft.aufirst=Brian&amp;rft_id=https%3A%2F%2Fbrianchristian.org%2Fthe-alignment-problem%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-39"><span class="mw-cite-backlink"><b><a href="#cite_ref-39">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDignum2021" class="citation journal cs1">Dignum, Virginia (26 May 2021). <a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fd41586-021-01397-x">"AI – the people and places that make, use and manage it"</a>. <i>Nature</i>. <b>593</b> (7860): 499–500. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2021Natur.593..499D">2021Natur.593..499D</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fd41586-021-01397-x">10.1038/d41586-021-01397-x</a></span>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:235216649">235216649</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Nature&amp;rft.atitle=AI+%E2%80%93+the+people+and+places+that+make%2C+use+and+manage+it&amp;rft.volume=593&amp;rft.issue=7860&amp;rft.pages=499-500&amp;rft.date=2021-05-26&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A235216649%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1038%2Fd41586-021-01397-x&amp;rft_id=info%3Abibcode%2F2021Natur.593..499D&amp;rft.aulast=Dignum&amp;rft.aufirst=Virginia&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1038%252Fd41586-021-01397-x&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-40"><span class="mw-cite-backlink"><b><a href="#cite_ref-40">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.bbc.com/news/technology-65110030">"Elon Musk among experts urging a halt to AI training"</a>. <i>BBC News</i>. 29 March 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">9 June</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=BBC+News&amp;rft.atitle=Elon+Musk+among+experts+urging+a+halt+to+AI+training&amp;rft.date=2023-03-29&amp;rft_id=https%3A%2F%2Fwww.bbc.com%2Fnews%2Ftechnology-65110030&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-41"><span class="mw-cite-backlink"><b><a href="#cite_ref-41">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.safe.ai/statement-on-ai-risk#open-letter">"Statement on AI Risk"</a>. <i>Center for AI Safety</i><span class="reference-accessdate">. Retrieved <span class="nowrap">8 June</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Center+for+AI+Safety&amp;rft.atitle=Statement+on+AI+Risk&amp;rft_id=https%3A%2F%2Fwww.safe.ai%2Fstatement-on-ai-risk%23open-letter&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-42"><span class="mw-cite-backlink"><b><a href="#cite_ref-42">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.bbc.com/news/uk-65746524">"Artificial intelligence could lead to extinction, experts warn"</a>. <i>BBC News</i>. 30 May 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">8 June</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=BBC+News&amp;rft.atitle=Artificial+intelligence+could+lead+to+extinction%2C+experts+warn&amp;rft.date=2023-05-30&amp;rft_id=https%3A%2F%2Fwww.bbc.com%2Fnews%2Fuk-65746524&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-43"><span class="mw-cite-backlink"><b><a href="#cite_ref-43">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.economist.com/1843/2019/03/01/deepmind-and-google-the-battle-to-control-artificial-intelligence">"DeepMind and Google: the battle to control artificial intelligence"</a>. <i>The Economist</i>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0013-0613">0013-0613</a><span class="reference-accessdate">. Retrieved <span class="nowrap">12 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Economist&amp;rft.atitle=DeepMind+and+Google%3A+the+battle+to+control+artificial+intelligence&amp;rft.issn=0013-0613&amp;rft_id=https%3A%2F%2Fwww.economist.com%2F1843%2F2019%2F03%2F01%2Fdeepmind-and-google-the-battle-to-control-artificial-intelligence&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-44"><span class="mw-cite-backlink"><b><a href="#cite_ref-44">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://ourworldindata.org/ai-timelines">"AI timelines: What do experts in artificial intelligence expect for the future?"</a>. <i>Our World in Data</i><span class="reference-accessdate">. Retrieved <span class="nowrap">12 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Our+World+in+Data&amp;rft.atitle=AI+timelines%3A+What+do+experts+in+artificial+intelligence+expect+for+the+future%3F&amp;rft_id=https%3A%2F%2Fourworldindata.org%2Fai-timelines&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-45"><span class="mw-cite-backlink"><b><a href="#cite_ref-45">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDe_Vynck2023" class="citation news cs1">De Vynck, Gerrit (20 May 2023). <a rel="nofollow" class="external text" href="https://www.washingtonpost.com/technology/2023/05/20/ai-existential-risk-debate/">"The debate over whether AI will destroy us is dividing Silicon Valley"</a>. <i>The Washington Post</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Washington+Post&amp;rft.atitle=The+debate+over+whether+AI+will+destroy+us+is+dividing+Silicon+Valley&amp;rft.date=2023-05-20&amp;rft.aulast=De+Vynck&amp;rft.aufirst=Gerrit&amp;rft_id=https%3A%2F%2Fwww.washingtonpost.com%2Ftechnology%2F2023%2F05%2F20%2Fai-existential-risk-debate%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-46"><span class="mw-cite-backlink"><b><a href="#cite_ref-46">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://fortune.com/2023/05/01/godfather-ai-geoffrey-hinton-quit-google-regrets-lifes-work-bad-actors/">"<span class="cs1-kern-left"></span>'The Godfather of A.I.' just quit Google and says he regrets his life's work because it can be hard to stop 'bad actors from using it for bad things'<span class="cs1-kern-right"></span>"</a>. <i>Fortune</i><span class="reference-accessdate">. Retrieved <span class="nowrap">12 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Fortune&amp;rft.atitle=%27The+Godfather+of+A.I.%27+just+quit+Google+and+says+he+regrets+his+life%27s+work+because+it+can+be+hard+to+stop+%27bad+actors+from+using+it+for+bad+things%27&amp;rft_id=https%3A%2F%2Ffortune.com%2F2023%2F05%2F01%2Fgodfather-ai-geoffrey-hinton-quit-google-regrets-lifes-work-bad-actors%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-47"><span class="mw-cite-backlink"><b><a href="#cite_ref-47">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.ornl.gov/news/super-speeds-super-ai-frontier-sets-new-pace-artificial-intelligence">"Super speeds for super AI: Frontier sets new pace for artificial intelligence"</a>. <i>ORNL</i>. 14 November 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">27 September</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=ORNL&amp;rft.atitle=Super+speeds+for+super+AI%3A+Frontier+sets+new+pace+for+artificial+intelligence&amp;rft.date=2023-11-14&amp;rft_id=https%3A%2F%2Fwww.ornl.gov%2Fnews%2Fsuper-speeds-super-ai-frontier-sets-new-pace-artificial-intelligence&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-48"><span class="mw-cite-backlink"><b><a href="#cite_ref-48">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.spiceworks.com/tech/artificial-intelligence/articles/everything-about-superintelligence/">"Everything you need to know about superintelligence"</a>. <i>Spiceworks</i><span class="reference-accessdate">. Retrieved <span class="nowrap">14 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Spiceworks&amp;rft.atitle=Everything+you+need+to+know+about+superintelligence&amp;rft_id=https%3A%2F%2Fwww.spiceworks.com%2Ftech%2Fartificial-intelligence%2Farticles%2Feverything-about-superintelligence%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-economist_review3-49"><span class="mw-cite-backlink">^ <a href="#cite_ref-economist_review3_49-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-economist_review3_49-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-economist_review3_49-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBabauta" class="citation web cs1">Babauta, Leo. <a rel="nofollow" class="external text" href="https://www.businessinsider.com/intelligent-machines-and-human-life-2014-8">"A Valuable New Book Explores The Potential Impacts Of Intelligent Machines On Human Life"</a>. <i>Business Insider</i><span class="reference-accessdate">. Retrieved <span class="nowrap">19 March</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Business+Insider&amp;rft.atitle=A+Valuable+New+Book+Explores+The+Potential+Impacts+Of+Intelligent+Machines+On+Human+Life&amp;rft.aulast=Babauta&amp;rft.aufirst=Leo&amp;rft_id=https%3A%2F%2Fwww.businessinsider.com%2Fintelligent-machines-and-human-life-2014-8&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-:11-50"><span class="mw-cite-backlink">^ <a href="#cite_ref-:11_50-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:11_50-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBostrom2015" class="citation cs2">Bostrom, Nick (27 April 2015), <a rel="nofollow" class="external text" href="https://www.ted.com/talks/nick_bostrom_what_happens_when_our_computers_get_smarter_than_we_are"><i>What happens when our computers get smarter than we are?</i></a><span class="reference-accessdate">, retrieved <span class="nowrap">13 July</span> 2023</span></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=What+happens+when+our+computers+get+smarter+than+we+are%3F&amp;rft.date=2015-04-27&amp;rft.aulast=Bostrom&amp;rft.aufirst=Nick&amp;rft_id=https%3A%2F%2Fwww.ted.com%2Ftalks%2Fnick_bostrom_what_happens_when_our_computers_get_smarter_than_we_are&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span>.</span> </li> <li id="cite_note-51"><span class="mw-cite-backlink"><b><a href="#cite_ref-51">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://openai.com/blog/governance-of-superintelligence">"Governance of superintelligence"</a>. <i>openai.com</i><span class="reference-accessdate">. Retrieved <span class="nowrap">12 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=openai.com&amp;rft.atitle=Governance+of+superintelligence&amp;rft_id=https%3A%2F%2Fopenai.com%2Fblog%2Fgovernance-of-superintelligence&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-52"><span class="mw-cite-backlink"><b><a href="#cite_ref-52">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://www.overcomingbias.com/2014/07/30855.html">"Overcoming Bias: I Still Don't Get Foom"</a>. <i>www.overcomingbias.com</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170804221136/http://www.overcomingbias.com/2014/07/30855.html">Archived</a> from the original on 4 August 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">20 September</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=www.overcomingbias.com&amp;rft.atitle=Overcoming+Bias%3A+I+Still+Don%27t+Get+Foom&amp;rft_id=http%3A%2F%2Fwww.overcomingbias.com%2F2014%2F07%2F30855.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-53"><span class="mw-cite-backlink"><b><a href="#cite_ref-53">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCotton-BarrattOrd2014" class="citation web cs1">Cotton-Barratt, Owen; Ord, Toby (12 August 2014). <a rel="nofollow" class="external text" href="https://www.fhi.ox.ac.uk/strategic-considerations-about-different-speeds-of-ai-takeoff/">"Strategic considerations about different speeds of AI takeoff"</a>. <i>The Future of Humanity Institute</i><span class="reference-accessdate">. Retrieved <span class="nowrap">12 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=The+Future+of+Humanity+Institute&amp;rft.atitle=Strategic+considerations+about+different+speeds+of+AI+takeoff&amp;rft.date=2014-08-12&amp;rft.aulast=Cotton-Barratt&amp;rft.aufirst=Owen&amp;rft.au=Ord%2C+Toby&amp;rft_id=https%3A%2F%2Fwww.fhi.ox.ac.uk%2Fstrategic-considerations-about-different-speeds-of-ai-takeoff%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-54"><span class="mw-cite-backlink"><b><a href="#cite_ref-54">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTegmark2023" class="citation magazine cs1">Tegmark, Max (25 April 2023). <a rel="nofollow" class="external text" href="https://time.com/6273743/thinking-that-could-doom-us-with-ai/">"The 'Don't Look Up' Thinking That Could Doom Us With AI"</a>. <i>Time</i><span class="reference-accessdate">. Retrieved <span class="nowrap">14 July</span> 2023</span>. <q>As if losing control to Chinese minds were scarier than losing control to alien digital minds that don't care about humans. [...] it's clear by now that the space of possible alien minds is vastly larger than that.</q></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Time&amp;rft.atitle=The+%27Don%27t+Look+Up%27+Thinking+That+Could+Doom+Us+With+AI&amp;rft.date=2023-04-25&amp;rft.aulast=Tegmark&amp;rft.aufirst=Max&amp;rft_id=https%3A%2F%2Ftime.com%2F6273743%2Fthinking-that-could-doom-us-with-ai%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-55"><span class="mw-cite-backlink"><b><a href="#cite_ref-55">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://axrp.net/episode/2023/02/04/episode-19-mechanistic-interpretability-neel-nanda.html">"19 – Mechanistic Interpretability with Neel Nanda"</a>. <i>AXRP – the AI X-risk Research Podcast</i>. 4 February 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">13 July</span> 2023</span>. <q>it's plausible to me that the main thing we need to get done is noticing specific circuits to do with deception and specific dangerous capabilities like that and situational awareness and internally-represented goals.</q></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=AXRP+%E2%80%93+the+AI+X-risk+Research+Podcast&amp;rft.atitle=19+%E2%80%93+Mechanistic+Interpretability+with+Neel+Nanda&amp;rft.date=2023-02-04&amp;rft_id=https%3A%2F%2Faxrp.net%2Fepisode%2F2023%2F02%2F04%2Fepisode-19-mechanistic-interpretability-neel-nanda.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-56"><span class="mw-cite-backlink"><b><a href="#cite_ref-56">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://aiimpacts.org/superintelligence-is-not-omniscience/">"Superintelligence Is Not Omniscience"</a>. <i>AI Impacts</i>. 7 April 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">16 April</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=AI+Impacts&amp;rft.atitle=Superintelligence+Is+Not+Omniscience&amp;rft.date=2023-04-07&amp;rft_id=https%3A%2F%2Faiimpacts.org%2Fsuperintelligence-is-not-omniscience%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-:03-57"><span class="mw-cite-backlink">^ <a href="#cite_ref-:03_57-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:03_57-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-:03_57-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-:03_57-3"><sup><i><b>d</b></i></sup></a> <a href="#cite_ref-:03_57-4"><sup><i><b>e</b></i></sup></a> <a href="#cite_ref-:03_57-5"><sup><i><b>f</b></i></sup></a> <a href="#cite_ref-:03_57-6"><sup><i><b>g</b></i></sup></a> <a href="#cite_ref-:03_57-7"><sup><i><b>h</b></i></sup></a> <a href="#cite_ref-:03_57-8"><sup><i><b>i</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHendrycksMazeikaWoodside,_Thomas2023" class="citation arxiv cs1">Hendrycks, Dan; Mazeika, Mantas; Woodside, Thomas (21 June 2023). "An Overview of Catastrophic AI Risks". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2306.12001">2306.12001</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CY">cs.CY</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=An+Overview+of+Catastrophic+AI+Risks&amp;rft.date=2023-06-21&amp;rft_id=info%3Aarxiv%2F2306.12001&amp;rft.aulast=Hendrycks&amp;rft.aufirst=Dan&amp;rft.au=Mazeika%2C+Mantas&amp;rft.au=Woodside%2C+Thomas&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-58"><span class="mw-cite-backlink"><b><a href="#cite_ref-58">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTaylorHern2023" class="citation news cs1">Taylor, Josh; Hern, Alex (2 May 2023). <a rel="nofollow" class="external text" href="https://www.theguardian.com/technology/2023/may/02/geoffrey-hinton-godfather-of-ai-quits-google-warns-dangers-of-machine-learning">"<span class="cs1-kern-left"></span>'Godfather of AI' Geoffrey Hinton quits Google and warns over dangers of misinformation"</a>. <i>The Guardian</i>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0261-3077">0261-3077</a><span class="reference-accessdate">. Retrieved <span class="nowrap">13 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Guardian&amp;rft.atitle=%27Godfather+of+AI%27+Geoffrey+Hinton+quits+Google+and+warns+over+dangers+of+misinformation&amp;rft.date=2023-05-02&amp;rft.issn=0261-3077&amp;rft.aulast=Taylor&amp;rft.aufirst=Josh&amp;rft.au=Hern%2C+Alex&amp;rft_id=https%3A%2F%2Fwww.theguardian.com%2Ftechnology%2F2023%2Fmay%2F02%2Fgeoffrey-hinton-godfather-of-ai-quits-google-warns-dangers-of-machine-learning&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-59"><span class="mw-cite-backlink"><b><a href="#cite_ref-59">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.euronews.com/next/2022/12/26/ai-cyber-attacks-are-a-critical-threat-this-is-how-nato-is-countering-them">"How NATO is preparing for a new era of AI cyber attacks"</a>. <i>euronews</i>. 26 December 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">13 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=euronews&amp;rft.atitle=How+NATO+is+preparing+for+a+new+era+of+AI+cyber+attacks&amp;rft.date=2022-12-26&amp;rft_id=https%3A%2F%2Fwww.euronews.com%2Fnext%2F2022%2F12%2F26%2Fai-cyber-attacks-are-a-critical-threat-this-is-how-nato-is-countering-them&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-60"><span class="mw-cite-backlink"><b><a href="#cite_ref-60">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.zdnet.com/article/chatgpt-and-the-new-ai-are-wreaking-havoc-on-cybersecurity/">"ChatGPT and the new AI are wreaking havoc on cybersecurity in exciting and frightening ways"</a>. <i>ZDNET</i><span class="reference-accessdate">. Retrieved <span class="nowrap">13 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=ZDNET&amp;rft.atitle=ChatGPT+and+the+new+AI+are+wreaking+havoc+on+cybersecurity+in+exciting+and+frightening+ways&amp;rft_id=https%3A%2F%2Fwww.zdnet.com%2Farticle%2Fchatgpt-and-the-new-ai-are-wreaking-havoc-on-cybersecurity%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-61"><span class="mw-cite-backlink"><b><a href="#cite_ref-61">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFToby_ShevlaneSebastian_FarquharBen_GarfinkelMary_Phuong2023" class="citation arxiv cs1">Toby Shevlane; Sebastian Farquhar; Ben Garfinkel; Mary Phuong; Jess Whittlestone; Jade Leung; Daniel Kokotajlo; Nahema Marchal; Markus Anderljung; Noam Kolt; Lewis Ho; Divya Siddarth; Shahar Avin; Will Hawkins; Been Kim; Iason Gabriel; Vijay Bolina; Jack Clark; Yoshua Bengio; Paul Christiano; Allan Dafoe (24 May 2023). "Model evaluation for extreme risks". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2305.15324">2305.15324</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.AI">cs.AI</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Model+evaluation+for+extreme+risks&amp;rft.date=2023-05-24&amp;rft_id=info%3Aarxiv%2F2305.15324&amp;rft.au=Toby+Shevlane&amp;rft.au=Sebastian+Farquhar&amp;rft.au=Ben+Garfinkel&amp;rft.au=Mary+Phuong&amp;rft.au=Jess+Whittlestone&amp;rft.au=Jade+Leung&amp;rft.au=Daniel+Kokotajlo&amp;rft.au=Nahema+Marchal&amp;rft.au=Markus+Anderljung&amp;rft.au=Noam+Kolt&amp;rft.au=Lewis+Ho&amp;rft.au=Divya+Siddarth&amp;rft.au=Shahar+Avin&amp;rft.au=Will+Hawkins&amp;rft.au=Been+Kim&amp;rft.au=Iason+Gabriel&amp;rft.au=Vijay+Bolina&amp;rft.au=Jack+Clark&amp;rft.au=Yoshua+Bengio&amp;rft.au=Paul+Christiano&amp;rft.au=Allan+Dafoe&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-62"><span class="mw-cite-backlink"><b><a href="#cite_ref-62">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFUrbinaLentzosInvernizziEkins2022" class="citation journal cs1">Urbina, Fabio; Lentzos, Filippa; Invernizzi, Cédric; Ekins, Sean (7 March 2022). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC9544280">"Dual use of artificial-intelligence-powered drug discovery"</a>. <i>Nature Machine Intelligence</i>. <b>4</b> (3): 189–191. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fs42256-022-00465-9">10.1038/s42256-022-00465-9</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2522-5839">2522-5839</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a>&#160;<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC9544280">9544280</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a>&#160;<a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/36211133">36211133</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Nature+Machine+Intelligence&amp;rft.atitle=Dual+use+of+artificial-intelligence-powered+drug+discovery&amp;rft.volume=4&amp;rft.issue=3&amp;rft.pages=189-191&amp;rft.date=2022-03-07&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC9544280%23id-name%3DPMC&amp;rft.issn=2522-5839&amp;rft_id=info%3Apmid%2F36211133&amp;rft_id=info%3Adoi%2F10.1038%2Fs42256-022-00465-9&amp;rft.aulast=Urbina&amp;rft.aufirst=Fabio&amp;rft.au=Lentzos%2C+Filippa&amp;rft.au=Invernizzi%2C+C%C3%A9dric&amp;rft.au=Ekins%2C+Sean&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC9544280&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-63"><span class="mw-cite-backlink"><b><a href="#cite_ref-63">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWalter2023" class="citation journal cs1">Walter, Yoshija (27 March 2023). <a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs43681-023-00276-7">"The rapid competitive economy of machine learning development: a discussion on the social risks and benefits"</a>. <i>AI and Ethics</i>. <b>4</b> (2): 1. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs43681-023-00276-7">10.1007/s43681-023-00276-7</a></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=AI+and+Ethics&amp;rft.atitle=The+rapid+competitive+economy+of+machine+learning+development%3A+a+discussion+on+the+social+risks+and+benefits&amp;rft.volume=4&amp;rft.issue=2&amp;rft.pages=1&amp;rft.date=2023-03-27&amp;rft_id=info%3Adoi%2F10.1007%2Fs43681-023-00276-7&amp;rft.aulast=Walter&amp;rft.aufirst=Yoshija&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1007%252Fs43681-023-00276-7&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-64"><span class="mw-cite-backlink"><b><a href="#cite_ref-64">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation magazine cs1"><a rel="nofollow" class="external text" href="https://time.com/6255952/ai-impact-chatgpt-microsoft-google/">"The AI Arms Race Is On. Start Worrying"</a>. <i>Time</i>. 16 February 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">17 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Time&amp;rft.atitle=The+AI+Arms+Race+Is+On.+Start+Worrying&amp;rft.date=2023-02-16&amp;rft_id=https%3A%2F%2Ftime.com%2F6255952%2Fai-impact-chatgpt-microsoft-google%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-65"><span class="mw-cite-backlink"><b><a href="#cite_ref-65">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBrimelow" class="citation web cs1">Brimelow, Ben. <a rel="nofollow" class="external text" href="https://www.businessinsider.com/slaughterbots-short-film-depicts-killer-drone-swarms-2017-11">"The short film 'Slaughterbots' depicts a dystopian future of killer drones swarming the world"</a>. <i>Business Insider</i><span class="reference-accessdate">. Retrieved <span class="nowrap">20 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Business+Insider&amp;rft.atitle=The+short+film+%27Slaughterbots%27+depicts+a+dystopian+future+of+killer+drones+swarming+the+world&amp;rft.aulast=Brimelow&amp;rft.aufirst=Ben&amp;rft_id=https%3A%2F%2Fwww.businessinsider.com%2Fslaughterbots-short-film-depicts-killer-drone-swarms-2017-11&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-66"><span class="mw-cite-backlink"><b><a href="#cite_ref-66">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMecklin2023" class="citation web cs1">Mecklin, John (17 July 2023). <a rel="nofollow" class="external text" href="https://thebulletin.org/2023/07/artificial-escalation-imagining-the-future-of-nuclear-risk/">"<span class="cs1-kern-left"></span>'Artificial Escalation': Imagining the future of nuclear risk"</a>. <i>Bulletin of the Atomic Scientists</i><span class="reference-accessdate">. Retrieved <span class="nowrap">20 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Bulletin+of+the+Atomic+Scientists&amp;rft.atitle=%27Artificial+Escalation%27%3A+Imagining+the+future+of+nuclear+risk&amp;rft.date=2023-07-17&amp;rft.aulast=Mecklin&amp;rft.aufirst=John&amp;rft_id=https%3A%2F%2Fthebulletin.org%2F2023%2F07%2Fartificial-escalation-imagining-the-future-of-nuclear-risk%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-priority-67"><span class="mw-cite-backlink"><b><a href="#cite_ref-priority_67-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBostrom2013" class="citation journal cs1">Bostrom, Nick (2013). <a rel="nofollow" class="external text" href="http://www.existential-risk.org/concept.pdf">"Existential Risk Prevention as Global Priority"</a> <span class="cs1-format">(PDF)</span>. <i>Global Policy</i>. <b>4</b> (1): 15–3. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1111%2F1758-5899.12002">10.1111/1758-5899.12002</a> &#8211; via Existential Risk.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Global+Policy&amp;rft.atitle=Existential+Risk+Prevention+as+Global+Priority&amp;rft.volume=4&amp;rft.issue=1&amp;rft.pages=15-3&amp;rft.date=2013&amp;rft_id=info%3Adoi%2F10.1111%2F1758-5899.12002&amp;rft.aulast=Bostrom&amp;rft.aufirst=Nick&amp;rft_id=http%3A%2F%2Fwww.existential-risk.org%2Fconcept.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-68"><span class="mw-cite-backlink"><b><a href="#cite_ref-68">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDoherty2018" class="citation news cs1">Doherty, Ben (17 May 2018). <a rel="nofollow" class="external text" href="https://www.theguardian.com/environment/2018/may/18/climate-change-an-existential-security-risk-to-australia-senate-inquiry-says">"Climate change an 'existential security risk' to Australia, Senate inquiry says"</a>. <i>The Guardian</i>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0261-3077">0261-3077</a><span class="reference-accessdate">. Retrieved <span class="nowrap">16 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Guardian&amp;rft.atitle=Climate+change+an+%27existential+security+risk%27+to+Australia%2C+Senate+inquiry+says&amp;rft.date=2018-05-17&amp;rft.issn=0261-3077&amp;rft.aulast=Doherty&amp;rft.aufirst=Ben&amp;rft_id=https%3A%2F%2Fwww.theguardian.com%2Fenvironment%2F2018%2Fmay%2F18%2Fclimate-change-an-existential-security-risk-to-australia-senate-inquiry-says&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-69"><span class="mw-cite-backlink"><b><a href="#cite_ref-69">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMacAskill2022" class="citation book cs1">MacAskill, William (2022). <i>What we owe the future</i>. New York, New York: Basic Books. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-1-5416-1862-6" title="Special:BookSources/978-1-5416-1862-6"><bdi>978-1-5416-1862-6</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=What+we+owe+the+future&amp;rft.place=New+York%2C+New+York&amp;rft.pub=Basic+Books&amp;rft.date=2022&amp;rft.isbn=978-1-5416-1862-6&amp;rft.aulast=MacAskill&amp;rft.aufirst=William&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-:0-70"><span class="mw-cite-backlink">^ <a href="#cite_ref-:0_70-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:0_70-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-:0_70-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-:0_70-3"><sup><i><b>d</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFOrd2020" class="citation book cs1">Ord, Toby (2020). "Chapter 5: Future Risks, Unaligned Artificial Intelligence". <i>The Precipice: Existential Risk and the Future of Humanity</i>. Bloomsbury Publishing. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-1-5266-0021-9" title="Special:BookSources/978-1-5266-0021-9"><bdi>978-1-5266-0021-9</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Chapter+5%3A+Future+Risks%2C+Unaligned+Artificial+Intelligence&amp;rft.btitle=The+Precipice%3A+Existential+Risk+and+the+Future+of+Humanity&amp;rft.pub=Bloomsbury+Publishing&amp;rft.date=2020&amp;rft.isbn=978-1-5266-0021-9&amp;rft.aulast=Ord&amp;rft.aufirst=Toby&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-71"><span class="mw-cite-backlink"><b><a href="#cite_ref-71">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMcMillan2024" class="citation web cs1">McMillan, Tim (15 March 2024). <a rel="nofollow" class="external text" href="https://thedebrief.org/navigating-humanitys-greatest-challenge-yet-experts-debate-the-existential-risks-of-ai/">"Navigating Humanity's Greatest Challenge Yet: Experts Debate the Existential Risks of AI"</a>. <i>The Debrief</i><span class="reference-accessdate">. Retrieved <span class="nowrap">26 September</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=The+Debrief&amp;rft.atitle=Navigating+Humanity%27s+Greatest+Challenge+Yet%3A+Experts+Debate+the+Existential+Risks+of+AI&amp;rft.date=2024-03-15&amp;rft.aulast=McMillan&amp;rft.aufirst=Tim&amp;rft_id=https%3A%2F%2Fthedebrief.org%2Fnavigating-humanitys-greatest-challenge-yet-experts-debate-the-existential-risks-of-ai%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-72"><span class="mw-cite-backlink"><b><a href="#cite_ref-72">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKasirzadeh2024" class="citation arxiv cs1">Kasirzadeh, Atoosa (2024). "Two Types of AI Existential Risk: Decisive and Accumulative". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2401.07836">2401.07836</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CR">cs.CR</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Two+Types+of+AI+Existential+Risk%3A+Decisive+and+Accumulative&amp;rft.date=2024&amp;rft_id=info%3Aarxiv%2F2401.07836&amp;rft.aulast=Kasirzadeh&amp;rft.aufirst=Atoosa&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-73"><span class="mw-cite-backlink"><b><a href="#cite_ref-73">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSamuelsson2019" class="citation magazine cs1">Samuelsson, Paul Conrad (June–July 2019). <a rel="nofollow" class="external text" href="https://philosophynow.org/issues/132/Artificial_Consciousness_Our_Greatest_Ethical_Challenge">"Artificial Consciousness: Our Greatest Ethical Challenge"</a>. <i>Philosophy Now</i>. No.&#160;132<span class="reference-accessdate">. Retrieved <span class="nowrap">19 August</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Philosophy+Now&amp;rft.atitle=Artificial+Consciousness%3A+Our+Greatest+Ethical+Challenge&amp;rft.issue=132&amp;rft.date=2019-06%2F2019-07&amp;rft.aulast=Samuelsson&amp;rft.aufirst=Paul+Conrad&amp;rft_id=https%3A%2F%2Fphilosophynow.org%2Fissues%2F132%2FArtificial_Consciousness_Our_Greatest_Ethical_Challenge&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-74"><span class="mw-cite-backlink"><b><a href="#cite_ref-74">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKateman2023" class="citation magazine cs1">Kateman, Brian (24 July 2023). <a rel="nofollow" class="external text" href="https://time.com/6296234/ai-should-be-terrified-of-humans/">"AI Should Be Terrified of Humans"</a>. <i>Time</i><span class="reference-accessdate">. Retrieved <span class="nowrap">19 August</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Time&amp;rft.atitle=AI+Should+Be+Terrified+of+Humans&amp;rft.date=2023-07-24&amp;rft.aulast=Kateman&amp;rft.aufirst=Brian&amp;rft_id=https%3A%2F%2Ftime.com%2F6296234%2Fai-should-be-terrified-of-humans%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-75"><span class="mw-cite-backlink"><b><a href="#cite_ref-75">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFisher" class="citation web cs1">Fisher, Richard. <a rel="nofollow" class="external text" href="https://www.bbc.com/future/article/20201111-philosophy-of-utility-monsters-and-artificial-intelligence">"The intelligent monster that you should let eat you"</a>. <i>www.bbc.com</i><span class="reference-accessdate">. Retrieved <span class="nowrap">19 August</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=www.bbc.com&amp;rft.atitle=The+intelligent+monster+that+you+should+let+eat+you&amp;rft.aulast=Fisher&amp;rft.aufirst=Richard&amp;rft_id=https%3A%2F%2Fwww.bbc.com%2Ffuture%2Farticle%2F20201111-philosophy-of-utility-monsters-and-artificial-intelligence&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-76"><span class="mw-cite-backlink"><b><a href="#cite_ref-76">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMore2023" class="citation web cs1">More, Max (19 June 2023). <a rel="nofollow" class="external text" href="https://maxmore.substack.com/p/existential-risk-vs-existential-opportunity">"Existential Risk vs. Existential Opportunity: A balanced approach to AI risk"</a>. <i>Extropic Thoughts</i><span class="reference-accessdate">. Retrieved <span class="nowrap">14 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Extropic+Thoughts&amp;rft.atitle=Existential+Risk+vs.+Existential+Opportunity%3A+A+balanced+approach+to+AI+risk&amp;rft.date=2023-06-19&amp;rft.aulast=More&amp;rft.aufirst=Max&amp;rft_id=https%3A%2F%2Fmaxmore.substack.com%2Fp%2Fexistential-risk-vs-existential-opportunity&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-omohundro-77"><span class="mw-cite-backlink"><b><a href="#cite_ref-omohundro_77-0">^</a></b></span> <span class="reference-text">Omohundro, S. M. (2008, February). The basic AI drives. In <i>AGI</i> (Vol. 171, pp. 483–492).</span> </li> <li id="cite_note-78"><span class="mw-cite-backlink"><b><a href="#cite_ref-78">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWakefield2015" class="citation news cs1">Wakefield, Jane (15 September 2015). <a rel="nofollow" class="external text" href="https://www.bbc.com/news/technology-34118481">"Why is Facebook investing in AI?"</a>. <i>BBC News</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171202192942/http://www.bbc.com/news/technology-34118481">Archived</a> from the original on 2 December 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">27 November</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=BBC+News&amp;rft.atitle=Why+is+Facebook+investing+in+AI%3F&amp;rft.date=2015-09-15&amp;rft.aulast=Wakefield&amp;rft.aufirst=Jane&amp;rft_id=https%3A%2F%2Fwww.bbc.com%2Fnews%2Ftechnology-34118481&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-79"><span class="mw-cite-backlink"><b><a href="#cite_ref-79">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFYudkowsky2011" class="citation web cs1">Yudkowsky, Eliezer (2011). <a rel="nofollow" class="external text" href="https://intelligence.org/files/ComplexValues.pdf">"Complex Value Systems are Required to Realize Valuable Futures"</a> <span class="cs1-format">(PDF)</span>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20150929212318/http://intelligence.org/files/ComplexValues.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 29 September 2015<span class="reference-accessdate">. Retrieved <span class="nowrap">10 August</span> 2020</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Complex+Value+Systems+are+Required+to+Realize+Valuable+Futures&amp;rft.date=2011&amp;rft.aulast=Yudkowsky&amp;rft.aufirst=Eliezer&amp;rft_id=https%3A%2F%2Fintelligence.org%2Ffiles%2FComplexValues.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-:5-80"><span class="mw-cite-backlink">^ <a href="#cite_ref-:5_80-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:5_80-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFOrd2020" class="citation book cs1"><a href="/wiki/Toby_Ord" title="Toby Ord">Ord, Toby</a> (2020). <i><a href="/wiki/The_Precipice:_Existential_Risk_and_the_Future_of_Humanity" title="The Precipice: Existential Risk and the Future of Humanity">The Precipice: Existential Risk and the Future of Humanity</a></i>. Bloomsbury Publishing Plc. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-1-5266-0019-6" title="Special:BookSources/978-1-5266-0019-6"><bdi>978-1-5266-0019-6</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=The+Precipice%3A+Existential+Risk+and+the+Future+of+Humanity&amp;rft.pub=Bloomsbury+Publishing+Plc&amp;rft.date=2020&amp;rft.isbn=978-1-5266-0019-6&amp;rft.aulast=Ord&amp;rft.aufirst=Toby&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-81"><span class="mw-cite-backlink"><b><a href="#cite_ref-81">^</a></b></span> <span class="reference-text">Yudkowsky, E. (2011, August). Complex value systems in friendly AI. In International Conference on Artificial General Intelligence (pp. 388–393). Germany: Springer, Berlin, Heidelberg.</span> </li> <li id="cite_note-82"><span class="mw-cite-backlink"><b><a href="#cite_ref-82">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRussell2014" class="citation web cs1"><a href="/wiki/Stuart_J._Russell" title="Stuart J. Russell">Russell, Stuart</a> (2014). <a rel="nofollow" class="external text" href="http://edge.org/conversation/the-myth-of-ai#26015">"Of Myths and Moonshine"</a>. <i><a href="/wiki/Edge_Foundation,_Inc." title="Edge Foundation, Inc.">Edge</a></i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160719124525/https://www.edge.org/conversation/the-myth-of-ai#26015">Archived</a> from the original on 19 July 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">23 October</span> 2015</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Edge&amp;rft.atitle=Of+Myths+and+Moonshine&amp;rft.date=2014&amp;rft.aulast=Russell&amp;rft.aufirst=Stuart&amp;rft_id=http%3A%2F%2Fedge.org%2Fconversation%2Fthe-myth-of-ai%2326015&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-acm2-83"><span class="mw-cite-backlink"><b><a href="#cite_ref-acm2_83-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDietterichHorvitz2015" class="citation journal cs1"><a href="/wiki/Eric_Horvitz" title="Eric Horvitz">Dietterich, Thomas</a>; Horvitz, Eric (2015). <a rel="nofollow" class="external text" href="http://research.microsoft.com/en-us/um/people/horvitz/CACM_Oct_2015-VP.pdf">"Rise of Concerns about AI: Reflections and Directions"</a> <span class="cs1-format">(PDF)</span>. <i><a href="/wiki/Communications_of_the_ACM" title="Communications of the ACM">Communications of the ACM</a></i>. <b>58</b> (10): 38–40. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1145%2F2770869">10.1145/2770869</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:20395145">20395145</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160304132930/http://research.microsoft.com/en-us/um/people/horvitz/CACM_Oct_2015-VP.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 4 March 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">23 October</span> 2015</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Communications+of+the+ACM&amp;rft.atitle=Rise+of+Concerns+about+AI%3A+Reflections+and+Directions&amp;rft.volume=58&amp;rft.issue=10&amp;rft.pages=38-40&amp;rft.date=2015&amp;rft_id=info%3Adoi%2F10.1145%2F2770869&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A20395145%23id-name%3DS2CID&amp;rft.aulast=Dietterich&amp;rft.aufirst=Thomas&amp;rft.au=Horvitz%2C+Eric&amp;rft_id=http%3A%2F%2Fresearch.microsoft.com%2Fen-us%2Fum%2Fpeople%2Fhorvitz%2FCACM_Oct_2015-VP.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-:3-84"><span class="mw-cite-backlink">^ <a href="#cite_ref-:3_84-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:3_84-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFYudkowsky2023" class="citation magazine cs1">Yudkowsky, Eliezer (29 March 2023). <a rel="nofollow" class="external text" href="https://time.com/6266923/ai-eliezer-yudkowsky-open-letter-not-enough/">"The Open Letter on AI Doesn't Go Far Enough"</a>. <i>Time</i><span class="reference-accessdate">. Retrieved <span class="nowrap">16 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Time&amp;rft.atitle=The+Open+Letter+on+AI+Doesn%27t+Go+Far+Enough&amp;rft.date=2023-03-29&amp;rft.aulast=Yudkowsky&amp;rft.aufirst=Eliezer&amp;rft_id=https%3A%2F%2Ftime.com%2F6266923%2Fai-eliezer-yudkowsky-open-letter-not-enough%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-85"><span class="mw-cite-backlink"><b><a href="#cite_ref-85">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBostrom2012" class="citation journal cs1">Bostrom, Nick (1 May 2012). "The Superintelligent Will: Motivation and Instrumental Rationality in Advanced Artificial Agents". <i>Minds and Machines</i>. <b>22</b> (2): 71–85. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs11023-012-9281-3">10.1007/s11023-012-9281-3</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1572-8641">1572-8641</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:254835485">254835485</a>. <q>as long as they possess a sufficient level of intelligence, agents having any of a wide range of final goals will pursue similar intermediary goals because they have instrumental reasons to do so.</q></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Minds+and+Machines&amp;rft.atitle=The+Superintelligent+Will%3A+Motivation+and+Instrumental+Rationality+in+Advanced+Artificial+Agents&amp;rft.volume=22&amp;rft.issue=2&amp;rft.pages=71-85&amp;rft.date=2012-05-01&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A254835485%23id-name%3DS2CID&amp;rft.issn=1572-8641&amp;rft_id=info%3Adoi%2F10.1007%2Fs11023-012-9281-3&amp;rft.aulast=Bostrom&amp;rft.aufirst=Nick&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-86"><span class="mw-cite-backlink"><b><a href="#cite_ref-86">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFNgoChanSören_Mindermann2023" class="citation arxiv cs1">Ngo, Richard; Chan, Lawrence; Sören Mindermann (22 February 2023). "The alignment problem from a deep learning perspective". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2209.00626">2209.00626</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.AI">cs.AI</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=The+alignment+problem+from+a+deep+learning+perspective&amp;rft.date=2023-02-22&amp;rft_id=info%3Aarxiv%2F2209.00626&amp;rft.aulast=Ngo&amp;rft.aufirst=Richard&amp;rft.au=Chan%2C+Lawrence&amp;rft.au=S%C3%B6ren+Mindermann&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-87"><span class="mw-cite-backlink"><b><a href="#cite_ref-87">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://openai.com/blog/introducing-superalignment">"Introducing Superalignment"</a>. <i>openai.com</i><span class="reference-accessdate">. Retrieved <span class="nowrap">16 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=openai.com&amp;rft.atitle=Introducing+Superalignment&amp;rft_id=https%3A%2F%2Fopenai.com%2Fblog%2Fintroducing-superalignment&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-slate_killer-88"><span class="mw-cite-backlink"><b><a href="#cite_ref-slate_killer_88-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTilli2016" class="citation news cs1">Tilli, Cecilia (28 April 2016). <a rel="nofollow" class="external text" href="http://www.slate.com/articles/technology/future_tense/2016/04/the_threats_that_artificial_intelligence_researchers_actually_worry_about.html">"Killer Robots? Lost Jobs?"</a>. <i>Slate</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160511183659/http://www.slate.com/articles/technology/future_tense/2016/04/the_threats_that_artificial_intelligence_researchers_actually_worry_about.html">Archived</a> from the original on 11 May 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">15 May</span> 2016</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Slate&amp;rft.atitle=Killer+Robots%3F+Lost+Jobs%3F&amp;rft.date=2016-04-28&amp;rft.aulast=Tilli&amp;rft.aufirst=Cecilia&amp;rft_id=http%3A%2F%2Fwww.slate.com%2Farticles%2Ftechnology%2Ffuture_tense%2F2016%2F04%2Fthe_threats_that_artificial_intelligence_researchers_actually_worry_about.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-89"><span class="mw-cite-backlink"><b><a href="#cite_ref-89">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://www.tor.com/2011/06/21/norvig-vs-chomsky-and-the-fight-for-the-future-of-ai/">"Norvig vs. Chomsky and the Fight for the Future of AI"</a>. <i>Tor.com</i>. 21 June 2011. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160513052842/http://www.tor.com/2011/06/21/norvig-vs-chomsky-and-the-fight-for-the-future-of-ai/">Archived</a> from the original on 13 May 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">15 May</span> 2016</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Tor.com&amp;rft.atitle=Norvig+vs.+Chomsky+and+the+Fight+for+the+Future+of+AI&amp;rft.date=2011-06-21&amp;rft_id=http%3A%2F%2Fwww.tor.com%2F2011%2F06%2F21%2Fnorvig-vs-chomsky-and-the-fight-for-the-future-of-ai%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-skeptic-90"><span class="mw-cite-backlink"><b><a href="#cite_ref-skeptic_90-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGraves2017" class="citation news cs1">Graves, Matthew (8 November 2017). <a rel="nofollow" class="external text" href="https://www.skeptic.com/reading_room/why-we-should-be-concerned-about-artificial-superintelligence/">"Why We Should Be Concerned About Artificial Superintelligence"</a>. <i><a href="/wiki/Skeptic_(US_magazine)" class="mw-redirect" title="Skeptic (US magazine)">Skeptic (US magazine)</a></i>. Vol.&#160;22, no.&#160;2. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171113050152/https://www.skeptic.com/reading_room/why-we-should-be-concerned-about-artificial-superintelligence/">Archived</a> from the original on 13 November 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">27 November</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Skeptic+%28US+magazine%29&amp;rft.atitle=Why+We+Should+Be+Concerned+About+Artificial+Superintelligence&amp;rft.volume=22&amp;rft.issue=2&amp;rft.date=2017-11-08&amp;rft.aulast=Graves&amp;rft.aufirst=Matthew&amp;rft_id=https%3A%2F%2Fwww.skeptic.com%2Freading_room%2Fwhy-we-should-be-concerned-about-artificial-superintelligence%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-91"><span class="mw-cite-backlink"><b><a href="#cite_ref-91">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFJohnson2015" class="citation news cs1">Johnson, Phil (30 July 2015). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190215143809/https://www.itworld.com/article/2823083/enterprise-software/88716-8-famous-software-bugs-in-space.html">"Houston, we have a bug: 9 famous software glitches in space"</a>. <i><a href="/wiki/IT_World" class="mw-redirect" title="IT World">IT World</a></i>. Archived from <a rel="nofollow" class="external text" href="https://www.itworld.com/article/2823083/enterprise-software/88716-8-famous-software-bugs-in-space.html">the original</a> on 15 February 2019<span class="reference-accessdate">. Retrieved <span class="nowrap">5 February</span> 2018</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=IT+World&amp;rft.atitle=Houston%2C+we+have+a+bug%3A+9+famous+software+glitches+in+space&amp;rft.date=2015-07-30&amp;rft.aulast=Johnson&amp;rft.aufirst=Phil&amp;rft_id=https%3A%2F%2Fwww.itworld.com%2Farticle%2F2823083%2Fenterprise-software%2F88716-8-famous-software-bugs-in-space.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-92"><span class="mw-cite-backlink"><b><a href="#cite_ref-92">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFYampolskiy2014" class="citation journal cs1">Yampolskiy, Roman V. (8 April 2014). "Utility function security in artificially intelligent agents". <i>Journal of Experimental &amp; Theoretical Artificial Intelligence</i>. <b>26</b> (3): 373–389. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1080%2F0952813X.2014.895114">10.1080/0952813X.2014.895114</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:16477341">16477341</a>. <q>Nothing precludes sufficiently smart self-improving systems from optimising their reward mechanisms in order to optimisetheir current-goal achievement and in the process making a mistake leading to corruption of their reward functions.</q></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Journal+of+Experimental+%26+Theoretical+Artificial+Intelligence&amp;rft.atitle=Utility+function+security+in+artificially+intelligent+agents&amp;rft.volume=26&amp;rft.issue=3&amp;rft.pages=373-389&amp;rft.date=2014-04-08&amp;rft_id=info%3Adoi%2F10.1080%2F0952813X.2014.895114&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A16477341%23id-name%3DS2CID&amp;rft.aulast=Yampolskiy&amp;rft.aufirst=Roman+V.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-93"><span class="mw-cite-backlink"><b><a href="#cite_ref-93">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.vox.com/2014/8/22/6043635/5-reasons-we-shouldnt-worry-about-super-intelligent-computers-taking">"Will artificial intelligence destroy humanity? Here are 5 reasons not to worry"</a>. <i>Vox</i>. 22 August 2014. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20151030092203/http://www.vox.com/2014/8/22/6043635/5-reasons-we-shouldnt-worry-about-super-intelligent-computers-taking">Archived</a> from the original on 30 October 2015<span class="reference-accessdate">. Retrieved <span class="nowrap">30 October</span> 2015</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Vox&amp;rft.atitle=Will+artificial+intelligence+destroy+humanity%3F+Here+are+5+reasons+not+to+worry.&amp;rft.date=2014-08-22&amp;rft_id=https%3A%2F%2Fwww.vox.com%2F2014%2F8%2F22%2F6043635%2F5-reasons-we-shouldnt-worry-about-super-intelligent-computers-taking&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-94"><span class="mw-cite-backlink"><b><a href="#cite_ref-94">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBostrom2014" class="citation book cs1">Bostrom, Nick (2014). <i>Superintelligence: Paths, Dangers, Strategies</i>. Oxford, United Kingdom: Oxford University Press. p.&#160;116. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-19-967811-2" title="Special:BookSources/978-0-19-967811-2"><bdi>978-0-19-967811-2</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Superintelligence%3A+Paths%2C+Dangers%2C+Strategies&amp;rft.place=Oxford%2C+United+Kingdom&amp;rft.pages=116&amp;rft.pub=Oxford+University+Press&amp;rft.date=2014&amp;rft.isbn=978-0-19-967811-2&amp;rft.aulast=Bostrom&amp;rft.aufirst=Nick&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-95"><span class="mw-cite-backlink"><b><a href="#cite_ref-95">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBostrom2012" class="citation web cs1">Bostrom, Nick (2012). <a rel="nofollow" class="external text" href="http://www.nickbostrom.com/superintelligentwill.pdf">"Superintelligent Will"</a> <span class="cs1-format">(PDF)</span>. <i>Nick Bostrom</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20151128034545/http://www.nickbostrom.com/superintelligentwill.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 28 November 2015<span class="reference-accessdate">. Retrieved <span class="nowrap">29 October</span> 2015</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Nick+Bostrom&amp;rft.atitle=Superintelligent+Will&amp;rft.date=2012&amp;rft.aulast=Bostrom&amp;rft.aufirst=Nick&amp;rft_id=http%3A%2F%2Fwww.nickbostrom.com%2Fsuperintelligentwill.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-armstrong-96"><span class="mw-cite-backlink"><b><a href="#cite_ref-armstrong_96-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFArmstrong2013" class="citation journal cs1">Armstrong, Stuart (1 January 2013). <a rel="nofollow" class="external text" href="https://www.questia.com/library/journal/1P3-3195465391/general-purpose-intelligence-arguing-the-orthogonality">"General Purpose Intelligence: Arguing the Orthogonality Thesis"</a>. <i>Analysis and Metaphysics</i>. <b>12</b>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20141011084205/http://www.questia.com/library/journal/1P3-3195465391/general-purpose-intelligence-arguing-the-orthogonality">Archived</a> from the original on 11 October 2014<span class="reference-accessdate">. Retrieved <span class="nowrap">2 April</span> 2020</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Analysis+and+Metaphysics&amp;rft.atitle=General+Purpose+Intelligence%3A+Arguing+the+Orthogonality+Thesis&amp;rft.volume=12&amp;rft.date=2013-01-01&amp;rft.aulast=Armstrong&amp;rft.aufirst=Stuart&amp;rft_id=https%3A%2F%2Fwww.questia.com%2Flibrary%2Fjournal%2F1P3-3195465391%2Fgeneral-purpose-intelligence-arguing-the-orthogonality&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span> Full text available <a rel="nofollow" class="external text" href="https://www.fhi.ox.ac.uk/wp-content/uploads/Orthogonality_Analysis_and_Metaethics-1.pdf">here</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200325025010/https://www.fhi.ox.ac.uk/wp-content/uploads/Orthogonality_Analysis_and_Metaethics-1.pdf">Archived</a> 25 March 2020 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>.</span> </li> <li id="cite_note-chorost-97"><span class="mw-cite-backlink">^ <a href="#cite_ref-chorost_97-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-chorost_97-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFChorost2016" class="citation magazine cs1">Chorost, Michael (18 April 2016). <a rel="nofollow" class="external text" href="http://www.slate.com/articles/technology/future_tense/2016/04/the_philosophical_argument_against_artificial_intelligence_killing_us_all.html">"Let Artificial Intelligence Evolve"</a>. <i>Slate</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171127213642/http://www.slate.com/articles/technology/future_tense/2016/04/the_philosophical_argument_against_artificial_intelligence_killing_us_all.html">Archived</a> from the original on 27 November 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">27 November</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Slate&amp;rft.atitle=Let+Artificial+Intelligence+Evolve&amp;rft.date=2016-04-18&amp;rft.aulast=Chorost&amp;rft.aufirst=Michael&amp;rft_id=http%3A%2F%2Fwww.slate.com%2Farticles%2Ftechnology%2Ffuture_tense%2F2016%2F04%2Fthe_philosophical_argument_against_artificial_intelligence_killing_us_all.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-Telegraph2016-98"><span class="mw-cite-backlink"><b><a href="#cite_ref-Telegraph2016_98-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><span class="id-lock-subscription" title="Paid subscription required"><a rel="nofollow" class="external text" href="https://www.telegraph.co.uk/technology/news/11837157/Should-humans-fear-the-rise-of-the-machine.html">"Should humans fear the rise of the machine?"</a></span>. <i><a href="/wiki/The_Telegraph_(UK)" class="mw-redirect" title="The Telegraph (UK)">The Telegraph (UK)</a></i>. 1 September 2015. <a rel="nofollow" class="external text" href="https://ghostarchive.org/archive/20220112/https://www.telegraph.co.uk/technology/news/11837157/Should-humans-fear-the-rise-of-the-machine.html">Archived</a> from the original on 12 January 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">7 February</span> 2016</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Telegraph+%28UK%29&amp;rft.atitle=Should+humans+fear+the+rise+of+the+machine%3F&amp;rft.date=2015-09-01&amp;rft_id=https%3A%2F%2Fwww.telegraph.co.uk%2Ftechnology%2Fnews%2F11837157%2FShould-humans-fear-the-rise-of-the-machine.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-shermer-99"><span class="mw-cite-backlink">^ <a href="#cite_ref-shermer_99-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-shermer_99-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFShermer2017" class="citation journal cs1">Shermer, Michael (1 March 2017). <a rel="nofollow" class="external text" href="https://www.scientificamerican.com/article/artificial-intelligence-is-not-a-threat-mdash-yet/">"Apocalypse AI"</a>. <i>Scientific American</i>. <b>316</b> (3): 77. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2017SciAm.316c..77S">2017SciAm.316c..77S</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fscientificamerican0317-77">10.1038/scientificamerican0317-77</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a>&#160;<a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/28207698">28207698</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171201051401/https://www.scientificamerican.com/article/artificial-intelligence-is-not-a-threat-mdash-yet/">Archived</a> from the original on 1 December 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">27 November</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Scientific+American&amp;rft.atitle=Apocalypse+AI&amp;rft.volume=316&amp;rft.issue=3&amp;rft.pages=77&amp;rft.date=2017-03-01&amp;rft_id=info%3Apmid%2F28207698&amp;rft_id=info%3Adoi%2F10.1038%2Fscientificamerican0317-77&amp;rft_id=info%3Abibcode%2F2017SciAm.316c..77S&amp;rft.aulast=Shermer&amp;rft.aufirst=Michael&amp;rft_id=https%3A%2F%2Fwww.scientificamerican.com%2Farticle%2Fartificial-intelligence-is-not-a-threat-mdash-yet%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-100"><span class="mw-cite-backlink"><b><a href="#cite_ref-100">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.bbc.com/news/technology-34118481">"Intelligent Machines: What does Facebook want with AI?"</a>. <i>BBC News</i>. 14 September 2015<span class="reference-accessdate">. Retrieved <span class="nowrap">31 March</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=BBC+News&amp;rft.atitle=Intelligent+Machines%3A+What+does+Facebook+want+with+AI%3F&amp;rft.date=2015-09-14&amp;rft_id=https%3A%2F%2Fwww.bbc.com%2Fnews%2Ftechnology-34118481&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-auto-102"><span class="mw-cite-backlink"><b><a href="#cite_ref-auto_102-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBaum2018" class="citation journal cs1">Baum, Seth (30 September 2018). <a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Finfo9100244">"Countering Superintelligence Misinformation"</a>. <i>Information</i>. <b>9</b> (10): 244. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Finfo9100244">10.3390/info9100244</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2078-2489">2078-2489</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Information&amp;rft.atitle=Countering+Superintelligence+Misinformation&amp;rft.volume=9&amp;rft.issue=10&amp;rft.pages=244&amp;rft.date=2018-09-30&amp;rft_id=info%3Adoi%2F10.3390%2Finfo9100244&amp;rft.issn=2078-2489&amp;rft.aulast=Baum&amp;rft.aufirst=Seth&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.3390%252Finfo9100244&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-103"><span class="mw-cite-backlink"><b><a href="#cite_ref-103">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.edge.org/conversation/jaron_lanier-the-myth-of-ai">"The Myth Of AI"</a>. <i>www.edge.org</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200311210407/https://www.edge.org/conversation/jaron_lanier-the-myth-of-ai">Archived</a> from the original on 11 March 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">11 March</span> 2020</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=www.edge.org&amp;rft.atitle=The+Myth+Of+AI&amp;rft_id=https%3A%2F%2Fwww.edge.org%2Fconversation%2Fjaron_lanier-the-myth-of-ai&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-:2-104"><span class="mw-cite-backlink"><b><a href="#cite_ref-:2_104-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBostrom" class="citation cs2">Bostrom, Nick, <i>Superintelligence: paths, dangers, strategies</i> (Audiobook), <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-1-5012-2774-5" title="Special:BookSources/978-1-5012-2774-5"><bdi>978-1-5012-2774-5</bdi></a>, <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/1061147095">1061147095</a></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Superintelligence%3A+paths%2C+dangers%2C+strategies&amp;rft_id=info%3Aoclcnum%2F1061147095&amp;rft.isbn=978-1-5012-2774-5&amp;rft.aulast=Bostrom&amp;rft.aufirst=Nick&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span>.</span> </li> <li id="cite_note-:4-105"><span class="mw-cite-backlink"><b><a href="#cite_ref-:4_105-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSotalaYampolskiy2014" class="citation journal cs1">Sotala, Kaj; Yampolskiy, Roman V (19 December 2014). <a rel="nofollow" class="external text" href="https://doi.org/10.1088%2F0031-8949%2F90%2F1%2F018001">"Responses to catastrophic AGI risk: a survey"</a>. <i>Physica Scripta</i>. <b>90</b> (1): 12. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2015PhyS...90a8001S">2015PhyS...90a8001S</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1088%2F0031-8949%2F90%2F1%2F018001">10.1088/0031-8949/90/1/018001</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0031-8949">0031-8949</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Physica+Scripta&amp;rft.atitle=Responses+to+catastrophic+AGI+risk%3A+a+survey&amp;rft.volume=90&amp;rft.issue=1&amp;rft.pages=12&amp;rft.date=2014-12-19&amp;rft.issn=0031-8949&amp;rft_id=info%3Adoi%2F10.1088%2F0031-8949%2F90%2F1%2F018001&amp;rft_id=info%3Abibcode%2F2015PhyS...90a8001S&amp;rft.aulast=Sotala&amp;rft.aufirst=Kaj&amp;rft.au=Yampolskiy%2C+Roman+V&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1088%252F0031-8949%252F90%252F1%252F018001&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-106"><span class="mw-cite-backlink"><b><a href="#cite_ref-106">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFPistono,_FedericoYampolskiy,_Roman_V.2016" class="citation book cs1">Pistono, Federico; Yampolskiy, Roman V. (9 May 2016). <i>Unethical Research: How to Create a Malevolent Artificial Intelligence</i>. <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/1106238048">1106238048</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Unethical+Research%3A+How+to+Create+a+Malevolent+Artificial+Intelligence&amp;rft.date=2016-05-09&amp;rft_id=info%3Aoclcnum%2F1106238048&amp;rft.au=Pistono%2C+Federico&amp;rft.au=Yampolskiy%2C+Roman+V.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-107"><span class="mw-cite-backlink"><b><a href="#cite_ref-107">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHaney2018" class="citation journal cs1">Haney, Brian Seamus (2018). "The Perils &amp; Promises of Artificial General Intelligence". <i>SSRN Working Paper Series</i>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.2139%2Fssrn.3261254">10.2139/ssrn.3261254</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1556-5068">1556-5068</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:86743553">86743553</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=SSRN+Working+Paper+Series&amp;rft.atitle=The+Perils+%26+Promises+of+Artificial+General+Intelligence&amp;rft.date=2018&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A86743553%23id-name%3DS2CID&amp;rft.issn=1556-5068&amp;rft_id=info%3Adoi%2F10.2139%2Fssrn.3261254&amp;rft.aulast=Haney&amp;rft.aufirst=Brian+Seamus&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-108"><span class="mw-cite-backlink"><b><a href="#cite_ref-108">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://spectrum.ieee.org/will-superintelligent-ais-be-our-doom">"Will Superintelligent AIs Be Our Doom?"</a>. <i>IEEE Spectrum</i>. 3 September 2014<span class="reference-accessdate">. Retrieved <span class="nowrap">13 September</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=IEEE+Spectrum&amp;rft.atitle=Will+Superintelligent+AIs+Be+Our+Doom%3F&amp;rft.date=2014-09-03&amp;rft_id=https%3A%2F%2Fspectrum.ieee.org%2Fwill-superintelligent-ais-be-our-doom&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-109"><span class="mw-cite-backlink"><b><a href="#cite_ref-109">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRussell2017" class="citation journal cs1">Russell, Stuart (30 August 2017). <a rel="nofollow" class="external text" href="https://doi.org/10.1038%2F548520a">"Artificial intelligence: The future is superintelligent"</a>. <i>Nature</i>. <b>548</b> (7669): 520–521. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2017Natur.548..520R">2017Natur.548..520R</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1038%2F548520a">10.1038/548520a</a></span>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:4459076">4459076</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Nature&amp;rft.atitle=Artificial+intelligence%3A+The+future+is+superintelligent&amp;rft.volume=548&amp;rft.issue=7669&amp;rft.pages=520-521&amp;rft.date=2017-08-30&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A4459076%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1038%2F548520a&amp;rft_id=info%3Abibcode%2F2017Natur.548..520R&amp;rft.aulast=Russell&amp;rft.aufirst=Stuart&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1038%252F548520a&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-life_3.0-110"><span class="mw-cite-backlink">^ <a href="#cite_ref-life_3.0_110-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-life_3.0_110-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-life_3.0_110-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTegmark2017" class="citation book cs1"><a href="/wiki/Max_Tegmark" title="Max Tegmark">Tegmark, Max</a> (2017). <a href="/wiki/Life_3.0:_Being_Human_in_the_Age_of_Artificial_Intelligence" class="mw-redirect" title="Life 3.0: Being Human in the Age of Artificial Intelligence"><i>Life 3.0: Being Human in the Age of Artificial Intelligence</i></a> (1st&#160;ed.). Mainstreaming AI Safety: Knopf. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-451-48507-6" title="Special:BookSources/978-0-451-48507-6"><bdi>978-0-451-48507-6</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Life+3.0%3A+Being+Human+in+the+Age+of+Artificial+Intelligence&amp;rft.place=Mainstreaming+AI+Safety&amp;rft.edition=1st&amp;rft.pub=Knopf&amp;rft.date=2017&amp;rft.isbn=978-0-451-48507-6&amp;rft.aulast=Tegmark&amp;rft.aufirst=Max&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-111"><span class="mw-cite-backlink"><b><a href="#cite_ref-111">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKumar" class="citation web cs1">Kumar, Vibhore. <a rel="nofollow" class="external text" href="https://www.forbes.com/sites/forbestechcouncil/2023/04/24/at-the-dawn-of-artificial-general-intelligence-balancing-abundance-with-existential-safeguards/">"Council Post: At The Dawn Of Artificial General Intelligence: Balancing Abundance With Existential Safeguards"</a>. <i>Forbes</i><span class="reference-accessdate">. Retrieved <span class="nowrap">23 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Forbes&amp;rft.atitle=Council+Post%3A+At+The+Dawn+Of+Artificial+General+Intelligence%3A+Balancing+Abundance+With+Existential+Safeguards&amp;rft.aulast=Kumar&amp;rft.aufirst=Vibhore&amp;rft_id=https%3A%2F%2Fwww.forbes.com%2Fsites%2Fforbestechcouncil%2F2023%2F04%2F24%2Fat-the-dawn-of-artificial-general-intelligence-balancing-abundance-with-existential-safeguards%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-:9-112"><span class="mw-cite-backlink">^ <a href="#cite_ref-:9_112-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:9_112-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://futureoflife.org/open-letter/pause-giant-ai-experiments/">"Pause Giant AI Experiments: An Open Letter"</a>. <i>Future of Life Institute</i><span class="reference-accessdate">. Retrieved <span class="nowrap">30 March</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Future+of+Life+Institute&amp;rft.atitle=Pause+Giant+AI+Experiments%3A+An+Open+Letter&amp;rft_id=https%3A%2F%2Ffutureoflife.org%2Fopen-letter%2Fpause-giant-ai-experiments%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-113"><span class="mw-cite-backlink"><b><a href="#cite_ref-113">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://futureoflife.org/ai-principles/">"AI Principles"</a>. <i><a href="/wiki/Future_of_Life_Institute" title="Future of Life Institute">Future of Life Institute</a></i>. 11 August 2017. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171211171044/https://futureoflife.org/ai-principles/">Archived</a> from the original on 11 December 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">11 December</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Future+of+Life+Institute&amp;rft.atitle=AI+Principles&amp;rft.date=2017-08-11&amp;rft_id=https%3A%2F%2Ffutureoflife.org%2Fai-principles%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-114"><span class="mw-cite-backlink"><b><a href="#cite_ref-114">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="http://www.newsweek.com/ai-asilomar-principles-artificial-intelligence-elon-musk-550525">"Elon Musk and Stephen Hawking warn of artificial intelligence arms race"</a>. <i><a href="/wiki/Newsweek" title="Newsweek">Newsweek</a></i>. 31 January 2017. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171211034528/http://www.newsweek.com/ai-asilomar-principles-artificial-intelligence-elon-musk-550525">Archived</a> from the original on 11 December 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">11 December</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Newsweek&amp;rft.atitle=Elon+Musk+and+Stephen+Hawking+warn+of+artificial+intelligence+arms+race&amp;rft.date=2017-01-31&amp;rft_id=http%3A%2F%2Fwww.newsweek.com%2Fai-asilomar-principles-artificial-intelligence-elon-musk-550525&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-115"><span class="mw-cite-backlink"><b><a href="#cite_ref-115">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFord2015" class="citation book cs1"><a href="/wiki/Martin_Ford_(author)" title="Martin Ford (author)">Ford, Martin</a> (2015). "Chapter 9: Super-intelligence and the Singularity". <a href="/wiki/Rise_of_the_Robots:_Technology_and_the_Threat_of_a_Jobless_Future" class="mw-redirect" title="Rise of the Robots: Technology and the Threat of a Jobless Future"><i>Rise of the Robots: Technology and the Threat of a Jobless Future</i></a>. Basic Books. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-465-05999-7" title="Special:BookSources/978-0-465-05999-7"><bdi>978-0-465-05999-7</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Chapter+9%3A+Super-intelligence+and+the+Singularity&amp;rft.btitle=Rise+of+the+Robots%3A+Technology+and+the+Threat+of+a+Jobless+Future&amp;rft.pub=Basic+Books&amp;rft.date=2015&amp;rft.isbn=978-0-465-05999-7&amp;rft.aulast=Ford&amp;rft.aufirst=Martin&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-116"><span class="mw-cite-backlink"><b><a href="#cite_ref-116">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBostrom2016" class="citation book cs1"><a href="/wiki/Nick_Bostrom" title="Nick Bostrom">Bostrom, Nick</a> (2016). "New Epilogue to the Paperback Edition". <a href="/wiki/Superintelligence:_Paths,_Dangers,_Strategies" title="Superintelligence: Paths, Dangers, Strategies"><i>Superintelligence: Paths, Dangers, Strategies</i></a> (Paperback&#160;ed.).</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=New+Epilogue+to+the+Paperback+Edition&amp;rft.btitle=Superintelligence%3A+Paths%2C+Dangers%2C+Strategies&amp;rft.edition=Paperback&amp;rft.date=2016&amp;rft.aulast=Bostrom&amp;rft.aufirst=Nick&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-:10-117"><span class="mw-cite-backlink"><b><a href="#cite_ref-:10_117-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation magazine cs1"><a rel="nofollow" class="external text" href="https://time.com/6258483/uncontrollable-ai-agi-risks/">"Why Uncontrollable AI Looks More Likely Than Ever"</a>. <i>Time</i>. 27 February 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">30 March</span> 2023</span>. <q>It is therefore no surprise that according to the most recent AI Impacts Survey, nearly half of 731 leading AI researchers think there is at least a 10% chance that human-level AI would lead to an "extremely negative outcome," or existential risk.</q></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Time&amp;rft.atitle=Why+Uncontrollable+AI+Looks+More+Likely+Than+Ever&amp;rft.date=2023-02-27&amp;rft_id=https%3A%2F%2Ftime.com%2F6258483%2Funcontrollable-ai-agi-risks%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-:132-118"><span class="mw-cite-backlink">^ <a href="#cite_ref-:132_118-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:132_118-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMaas2019" class="citation journal cs1">Maas, Matthijs M. (6 February 2019). "How viable is international arms control for military artificial intelligence? Three lessons from nuclear weapons of mass destruction". <i>Contemporary Security Policy</i>. <b>40</b> (3): 285–311. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1080%2F13523260.2019.1576464">10.1080/13523260.2019.1576464</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1352-3260">1352-3260</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:159310223">159310223</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Contemporary+Security+Policy&amp;rft.atitle=How+viable+is+international+arms+control+for+military+artificial+intelligence%3F+Three+lessons+from+nuclear+weapons+of+mass+destruction&amp;rft.volume=40&amp;rft.issue=3&amp;rft.pages=285-311&amp;rft.date=2019-02-06&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A159310223%23id-name%3DS2CID&amp;rft.issn=1352-3260&amp;rft_id=info%3Adoi%2F10.1080%2F13523260.2019.1576464&amp;rft.aulast=Maas&amp;rft.aufirst=Matthijs+M.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-:6-119"><span class="mw-cite-backlink">^ <a href="#cite_ref-:6_119-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:6_119-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.abc.net.au/news/2023-03-24/what-is-agi-artificial-general-intelligence-ai-experts-risks/102035132">"Impressed by artificial intelligence? Experts say AGI is coming next, and it has 'existential' risks"</a>. <i>ABC News</i>. 23 March 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">30 March</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=ABC+News&amp;rft.atitle=Impressed+by+artificial+intelligence%3F+Experts+say+AGI+is+coming+next%2C+and+it+has+%27existential%27+risks&amp;rft.date=2023-03-23&amp;rft_id=https%3A%2F%2Fwww.abc.net.au%2Fnews%2F2023-03-24%2Fwhat-is-agi-artificial-general-intelligence-ai-experts-risks%2F102035132&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-BBC_News-120"><span class="mw-cite-backlink"><b><a href="#cite_ref-BBC_News_120-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRawlinson2015" class="citation news cs1">Rawlinson, Kevin (29 January 2015). <a rel="nofollow" class="external text" href="https://www.bbc.co.uk/news/31047780">"Microsoft's Bill Gates insists AI is a threat"</a>. <i><a href="/wiki/BBC_News" title="BBC News">BBC News</a></i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20150129183607/http://www.bbc.co.uk/news/31047780">Archived</a> from the original on 29 January 2015<span class="reference-accessdate">. Retrieved <span class="nowrap">30 January</span> 2015</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=BBC+News&amp;rft.atitle=Microsoft%27s+Bill+Gates+insists+AI+is+a+threat&amp;rft.date=2015-01-29&amp;rft.aulast=Rawlinson&amp;rft.aufirst=Kevin&amp;rft_id=https%3A%2F%2Fwww.bbc.co.uk%2Fnews%2F31047780&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-121"><span class="mw-cite-backlink"><b><a href="#cite_ref-121">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWashington_Post2015" class="citation web cs1">Washington Post (14 December 2015). <a rel="nofollow" class="external text" href="https://www.chicagotribune.com/bluesky/technology/ct-tech-titans-against-terminators-20151214-story.html">"Tech titans like Elon Musk are spending $1 billion to save you from terminators"</a>. <i><a href="/wiki/Chicago_Tribune" title="Chicago Tribune">Chicago Tribune</a></i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160607121118/http://www.chicagotribune.com/bluesky/technology/ct-tech-titans-against-terminators-20151214-story.html">Archived</a> from the original on 7 June 2016.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Chicago+Tribune&amp;rft.atitle=Tech+titans+like+Elon+Musk+are+spending+%241+billion+to+save+you+from+terminators&amp;rft.date=2015-12-14&amp;rft.au=Washington+Post&amp;rft_id=https%3A%2F%2Fwww.chicagotribune.com%2Fbluesky%2Ftechnology%2Fct-tech-titans-against-terminators-20151214-story.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-122"><span class="mw-cite-backlink"><b><a href="#cite_ref-122">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><span class="id-lock-registration" title="Free registration required"><a rel="nofollow" class="external text" href="https://www.washingtonpost.com/technology/2023/04/09/ai-safety-openai/">"Doomsday to utopia: Meet AI's rival factions"</a></span>. <i>Washington Post</i>. 9 April 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">30 April</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Washington+Post&amp;rft.atitle=Doomsday+to+utopia%3A+Meet+AI%27s+rival+factions&amp;rft.date=2023-04-09&amp;rft_id=https%3A%2F%2Fwww.washingtonpost.com%2Ftechnology%2F2023%2F04%2F09%2Fai-safety-openai%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-123"><span class="mw-cite-backlink"><b><a href="#cite_ref-123">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.openphilanthropy.org/grants/uc-berkeley-center-for-human-compatible-ai-2016/">"UC Berkeley – Center for Human-Compatible AI (2016)"</a>. <i>Open Philanthropy</i>. 27 June 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">30 April</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Open+Philanthropy&amp;rft.atitle=UC+Berkeley+%E2%80%93+Center+for+Human-Compatible+AI+%282016%29&amp;rft.date=2016-06-27&amp;rft_id=https%3A%2F%2Fwww.openphilanthropy.org%2Fgrants%2Fuc-berkeley-center-for-human-compatible-ai-2016%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-124"><span class="mw-cite-backlink"><b><a href="#cite_ref-124">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://www.techinsider.io/mysterious-artificial-intelligence-company-elon-musk-investment-2015-10">"The mysterious artificial intelligence company Elon Musk invested in is developing game-changing smart computers"</a>. <i>Tech Insider</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20151030165333/http://www.techinsider.io/mysterious-artificial-intelligence-company-elon-musk-investment-2015-10">Archived</a> from the original on 30 October 2015<span class="reference-accessdate">. Retrieved <span class="nowrap">30 October</span> 2015</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Tech+Insider&amp;rft.atitle=The+mysterious+artificial+intelligence+company+Elon+Musk+invested+in+is+developing+game-changing+smart+computers&amp;rft_id=http%3A%2F%2Fwww.techinsider.io%2Fmysterious-artificial-intelligence-company-elon-musk-investment-2015-10&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-FOOTNOTEClark2015a-125"><span class="mw-cite-backlink"><b><a href="#cite_ref-FOOTNOTEClark2015a_125-0">^</a></b></span> <span class="reference-text"><a href="#CITEREFClark2015a">Clark 2015a</a>.</span> </li> <li id="cite_note-126"><span class="mw-cite-backlink"><b><a href="#cite_ref-126">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://www.fastcompany.com/3041007/fast-feed/elon-musk-is-donating-10m-of-his-own-money-to-artificial-intelligence-research">"Elon Musk Is Donating $10M Of His Own Money To Artificial Intelligence Research"</a>. <i>Fast Company</i>. 15 January 2015. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20151030202356/http://www.fastcompany.com/3041007/fast-feed/elon-musk-is-donating-10m-of-his-own-money-to-artificial-intelligence-research">Archived</a> from the original on 30 October 2015<span class="reference-accessdate">. Retrieved <span class="nowrap">30 October</span> 2015</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Fast+Company&amp;rft.atitle=Elon+Musk+Is+Donating+%2410M+Of+His+Own+Money+To+Artificial+Intelligence+Research&amp;rft.date=2015-01-15&amp;rft_id=http%3A%2F%2Fwww.fastcompany.com%2F3041007%2Ffast-feed%2Felon-musk-is-donating-10m-of-his-own-money-to-artificial-intelligence-research&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-slate_killer2-127"><span class="mw-cite-backlink"><b><a href="#cite_ref-slate_killer2_127-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTilli2016" class="citation news cs1">Tilli, Cecilia (28 April 2016). <a rel="nofollow" class="external text" href="http://www.slate.com/articles/technology/future_tense/2016/04/the_threats_that_artificial_intelligence_researchers_actually_worry_about.html">"Killer Robots? Lost Jobs?"</a>. <i>Slate</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160511183659/http://www.slate.com/articles/technology/future_tense/2016/04/the_threats_that_artificial_intelligence_researchers_actually_worry_about.html">Archived</a> from the original on 11 May 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">15 May</span> 2016</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Slate&amp;rft.atitle=Killer+Robots%3F+Lost+Jobs%3F&amp;rft.date=2016-04-28&amp;rft.aulast=Tilli&amp;rft.aufirst=Cecilia&amp;rft_id=http%3A%2F%2Fwww.slate.com%2Farticles%2Ftechnology%2Ffuture_tense%2F2016%2F04%2Fthe_threats_that_artificial_intelligence_researchers_actually_worry_about.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-new_yorker_doomsday2-128"><span class="mw-cite-backlink"><b><a href="#cite_ref-new_yorker_doomsday2_128-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKhatchadourian2015" class="citation news cs1">Khatchadourian, Raffi (23 November 2015). <a rel="nofollow" class="external text" href="https://www.newyorker.com/magazine/2015/11/23/doomsday-invention-artificial-intelligence-nick-bostrom">"The Doomsday Invention: Will artificial intelligence bring us utopia or destruction?"</a>. <i><a href="/wiki/The_New_Yorker_(magazine)" class="mw-redirect" title="The New Yorker (magazine)">The New Yorker</a></i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190429183807/https://www.newyorker.com/magazine/2015/11/23/doomsday-invention-artificial-intelligence-nick-bostrom">Archived</a> from the original on 29 April 2019<span class="reference-accessdate">. Retrieved <span class="nowrap">7 February</span> 2016</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+New+Yorker&amp;rft.atitle=The+Doomsday+Invention%3A+Will+artificial+intelligence+bring+us+utopia+or+destruction%3F&amp;rft.date=2015-11-23&amp;rft.aulast=Khatchadourian&amp;rft.aufirst=Raffi&amp;rft_id=https%3A%2F%2Fwww.newyorker.com%2Fmagazine%2F2015%2F11%2F23%2Fdoomsday-invention-artificial-intelligence-nick-bostrom&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-129"><span class="mw-cite-backlink"><b><a href="#cite_ref-129">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://arstechnica.com/information-technology/2023/05/warning-of-ais-danger-pioneer-geoffrey-hinton-quits-google-to-speak-freely/">"Warning of AI's danger, pioneer Geoffrey Hinton quits Google to speak freely"</a>. <i>www.arstechnica.com</i>. 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">23 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=www.arstechnica.com&amp;rft.atitle=Warning+of+AI%27s+danger%2C+pioneer+Geoffrey+Hinton+quits+Google+to+speak+freely&amp;rft.date=2023&amp;rft_id=https%3A%2F%2Farstechnica.com%2Finformation-technology%2F2023%2F05%2Fwarning-of-ais-danger-pioneer-geoffrey-hinton-quits-google-to-speak-freely%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-130"><span class="mw-cite-backlink"><b><a href="#cite_ref-130">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGarling2015" class="citation magazine cs1">Garling, Caleb (5 May 2015). <a rel="nofollow" class="external text" href="https://www.wired.com/brandlab/2015/05/andrew-ng-deep-learning-mandate-humans-not-just-machines/">"Andrew Ng: Why 'Deep Learning' Is a Mandate for Humans, Not Just Machines"</a>. <i>Wired</i><span class="reference-accessdate">. Retrieved <span class="nowrap">31 March</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Wired&amp;rft.atitle=Andrew+Ng%3A+Why+%27Deep+Learning%27+Is+a+Mandate+for+Humans%2C+Not+Just+Machines&amp;rft.date=2015-05-05&amp;rft.aulast=Garling&amp;rft.aufirst=Caleb&amp;rft_id=https%3A%2F%2Fwww.wired.com%2Fbrandlab%2F2015%2F05%2Fandrew-ng-deep-learning-mandate-humans-not-just-machines%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-131"><span class="mw-cite-backlink"><b><a href="#cite_ref-131">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://mambapost.com/2023/04/tech-news/ai-are-an-existential-threat-to-humanity/">"Is artificial intelligence really an existential threat to humanity?"</a>. <i>MambaPost</i>. 4 April 2023.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=MambaPost&amp;rft.atitle=Is+artificial+intelligence+really+an+existential+threat+to+humanity%3F&amp;rft.date=2023-04-04&amp;rft_id=https%3A%2F%2Fmambapost.com%2F2023%2F04%2Ftech-news%2Fai-are-an-existential-threat-to-humanity%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-132"><span class="mw-cite-backlink"><b><a href="#cite_ref-132">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://fusion.net/story/54583/the-case-against-killer-robots-from-a-guy-actually-building-ai/">"The case against killer robots, from a guy actually working on artificial intelligence"</a>. <i>Fusion.net</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160204175716/http://fusion.net/story/54583/the-case-against-killer-robots-from-a-guy-actually-building-ai/">Archived</a> from the original on 4 February 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">31 January</span> 2016</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Fusion.net&amp;rft.atitle=The+case+against+killer+robots%2C+from+a+guy+actually+working+on+artificial+intelligence&amp;rft_id=http%3A%2F%2Ffusion.net%2Fstory%2F54583%2Fthe-case-against-killer-robots-from-a-guy-actually-building-ai%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-133"><span class="mw-cite-backlink"><b><a href="#cite_ref-133">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://venturebeat.com/ai/ai-experts-challenge-doomer-narrative-including-extinction-risk-claims/">"AI experts challenge 'doomer' narrative, including 'extinction risk' claims"</a>. <i>VentureBeat</i>. 31 May 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">8 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=VentureBeat&amp;rft.atitle=AI+experts+challenge+%27doomer%27+narrative%2C+including+%27extinction+risk%27+claims&amp;rft.date=2023-05-31&amp;rft_id=https%3A%2F%2Fventurebeat.com%2Fai%2Fai-experts-challenge-doomer-narrative-including-extinction-risk-claims%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-134"><span class="mw-cite-backlink"><b><a href="#cite_ref-134">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFColdewey2023" class="citation web cs1">Coldewey, Devin (1 April 2023). <a rel="nofollow" class="external text" href="https://techcrunch.com/2023/03/31/ethicists-fire-back-at-ai-pause-letter-they-say-ignores-the-actual-harms/">"Ethicists fire back at 'AI Pause' letter they say 'ignores the actual harms'<span class="cs1-kern-right"></span>"</a>. <i>TechCrunch</i><span class="reference-accessdate">. Retrieved <span class="nowrap">23 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=TechCrunch&amp;rft.atitle=Ethicists+fire+back+at+%27AI+Pause%27+letter+they+say+%27ignores+the+actual+harms%27&amp;rft.date=2023-04-01&amp;rft.aulast=Coldewey&amp;rft.aufirst=Devin&amp;rft_id=https%3A%2F%2Ftechcrunch.com%2F2023%2F03%2F31%2Fethicists-fire-back-at-ai-pause-letter-they-say-ignores-the-actual-harms%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-135"><span class="mw-cite-backlink"><b><a href="#cite_ref-135">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://dair-institute.org/">"DAIR (Distributed AI Research Institute)"</a>. <i><a href="/wiki/DAIR_Institute" class="mw-redirect" title="DAIR Institute">DAIR Institute</a></i><span class="reference-accessdate">. Retrieved <span class="nowrap">23 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=DAIR+Institute&amp;rft.atitle=DAIR+%28Distributed+AI+Research+Institute%29&amp;rft_id=https%3A%2F%2Fdair-institute.org%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-136"><span class="mw-cite-backlink"><b><a href="#cite_ref-136">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGebruTorres2024" class="citation journal cs1">Gebru, Timnit; Torres, Émile P. (14 April 2024). <a rel="nofollow" class="external text" href="https://firstmonday.org/ojs/index.php/fm/article/view/13636">"The TESCREAL bundle: Eugenics and the promise of utopia through artificial general intelligence"</a>. <i>First Monday</i>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.5210%2Ffm.v29i4.13636">10.5210/fm.v29i4.13636</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1396-0466">1396-0466</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=First+Monday&amp;rft.atitle=The+TESCREAL+bundle%3A+Eugenics+and+the+promise+of+utopia+through+artificial+general+intelligence&amp;rft.date=2024-04-14&amp;rft_id=info%3Adoi%2F10.5210%2Ffm.v29i4.13636&amp;rft.issn=1396-0466&amp;rft.aulast=Gebru&amp;rft.aufirst=Timnit&amp;rft.au=Torres%2C+%C3%89mile+P.&amp;rft_id=https%3A%2F%2Ffirstmonday.org%2Fojs%2Findex.php%2Ffm%2Farticle%2Fview%2F13636&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-137"><span class="mw-cite-backlink"><b><a href="#cite_ref-137">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKelly2017" class="citation magazine cs1"><a href="/wiki/Kevin_Kelly_(editor)" title="Kevin Kelly (editor)">Kelly, Kevin</a> (25 April 2017). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20211226181932/https://www.wired.com/2017/04/the-myth-of-a-superhuman-ai/">"The Myth of a Superhuman AI"</a>. <i>Wired</i>. Archived from <a rel="nofollow" class="external text" href="https://www.wired.com/2017/04/the-myth-of-a-superhuman-ai/">the original</a> on 26 December 2021<span class="reference-accessdate">. Retrieved <span class="nowrap">19 February</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Wired&amp;rft.atitle=The+Myth+of+a+Superhuman+AI&amp;rft.date=2017-04-25&amp;rft.aulast=Kelly&amp;rft.aufirst=Kevin&amp;rft_id=https%3A%2F%2Fwww.wired.com%2F2017%2F04%2Fthe-myth-of-a-superhuman-ai%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-138"><span class="mw-cite-backlink"><b><a href="#cite_ref-138">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFJindal2023" class="citation web cs1">Jindal, Siddharth (7 July 2023). <a rel="nofollow" class="external text" href="https://analyticsindiamag.com/openais-farfetched-pursuit-of-ai-alignment/">"OpenAI's Pursuit of AI Alignment is Farfetched"</a>. <i>Analytics India Magazine</i><span class="reference-accessdate">. Retrieved <span class="nowrap">23 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Analytics+India+Magazine&amp;rft.atitle=OpenAI%27s+Pursuit+of+AI+Alignment+is+Farfetched&amp;rft.date=2023-07-07&amp;rft.aulast=Jindal&amp;rft.aufirst=Siddharth&amp;rft_id=https%3A%2F%2Fanalyticsindiamag.com%2Fopenais-farfetched-pursuit-of-ai-alignment%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-139"><span class="mw-cite-backlink"><b><a href="#cite_ref-139">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.businessinsider.com/mark-zuckerberg-shares-thoughts-elon-musks-ai-2018-5">"Mark Zuckerberg responds to Elon Musk's paranoia about AI: 'AI is going to... help keep our communities safe.'<span class="cs1-kern-right"></span>"</a>. <i>Business Insider</i>. 25 May 2018. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190506173756/https://www.businessinsider.com/mark-zuckerberg-shares-thoughts-elon-musks-ai-2018-5">Archived</a> from the original on 6 May 2019<span class="reference-accessdate">. Retrieved <span class="nowrap">6 May</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Business+Insider&amp;rft.atitle=Mark+Zuckerberg+responds+to+Elon+Musk%27s+paranoia+about+AI%3A+%27AI+is+going+to...+help+keep+our+communities+safe.%27&amp;rft.date=2018-05-25&amp;rft_id=https%3A%2F%2Fwww.businessinsider.com%2Fmark-zuckerberg-shares-thoughts-elon-musks-ai-2018-5&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-140"><span class="mw-cite-backlink"><b><a href="#cite_ref-140">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDadich" class="citation magazine cs1">Dadich, Scott. <a rel="nofollow" class="external text" href="https://www.wired.com/2016/10/president-obama-mit-joi-ito-interview/">"Barack Obama Talks AI, Robo Cars, and the Future of the World"</a>. <i>WIRED</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171203142607/https://www.wired.com/2016/10/president-obama-mit-joi-ito-interview/">Archived</a> from the original on 3 December 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">27 November</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=WIRED&amp;rft.atitle=Barack+Obama+Talks+AI%2C+Robo+Cars%2C+and+the+Future+of+the+World&amp;rft.aulast=Dadich&amp;rft.aufirst=Scott&amp;rft_id=https%3A%2F%2Fwww.wired.com%2F2016%2F10%2Fpresident-obama-mit-joi-ito-interview%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-141"><span class="mw-cite-backlink"><b><a href="#cite_ref-141">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKircher" class="citation news cs1">Kircher, Madison Malone. <a rel="nofollow" class="external text" href="https://nymag.com/selectall/2016/10/barack-obama-talks-artificial-intelligence-in-wired.html">"Obama on the Risks of AI: 'You Just Gotta Have Somebody Close to the Power Cord'<span class="cs1-kern-right"></span>"</a>. <i>Select All</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171201040306/http://nymag.com/selectall/2016/10/barack-obama-talks-artificial-intelligence-in-wired.html">Archived</a> from the original on 1 December 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">27 November</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Select+All&amp;rft.atitle=Obama+on+the+Risks+of+AI%3A+%27You+Just+Gotta+Have+Somebody+Close+to+the+Power+Cord%27&amp;rft.aulast=Kircher&amp;rft.aufirst=Madison+Malone&amp;rft_id=https%3A%2F%2Fnymag.com%2Fselectall%2F2016%2F10%2Fbarack-obama-talks-artificial-intelligence-in-wired.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-142"><span class="mw-cite-backlink"><b><a href="#cite_ref-142">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFClinton2017" class="citation book cs1">Clinton, Hillary (2017). <a href="/wiki/What_Happened_(Clinton_book)" title="What Happened (Clinton book)"><i>What Happened</i></a>. Simon and Schuster. p.&#160;241. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-1-5011-7556-5" title="Special:BookSources/978-1-5011-7556-5"><bdi>978-1-5011-7556-5</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=What+Happened&amp;rft.pages=241&amp;rft.pub=Simon+and+Schuster&amp;rft.date=2017&amp;rft.isbn=978-1-5011-7556-5&amp;rft.aulast=Clinton&amp;rft.aufirst=Hillary&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span> via <a rel="nofollow" class="external autonumber" href="http://lukemuehlhauser.com/hillary-clinton-on-ai-risk/">[1]</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171201033319/http://lukemuehlhauser.com/hillary-clinton-on-ai-risk/">Archived</a> 1 December 2017 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a></span> </li> <li id="cite_note-143"><span class="mw-cite-backlink"><b><a href="#cite_ref-143">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.usatoday.com/story/tech/news/2018/01/02/artificial-intelligence-end-world-overblown-fears/985813001/">"Elon Musk says AI could doom human civilization. Zuckerberg disagrees. Who's right?"</a>. 5 January 2023. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180108075432/https://www.usatoday.com/story/tech/news/2018/01/02/artificial-intelligence-end-world-overblown-fears/985813001/">Archived</a> from the original on 8 January 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">8 January</span> 2018</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.atitle=Elon+Musk+says+AI+could+doom+human+civilization.+Zuckerberg+disagrees.+Who%27s+right%3F&amp;rft.date=2023-01-05&amp;rft_id=https%3A%2F%2Fwww.usatoday.com%2Fstory%2Ftech%2Fnews%2F2018%2F01%2F02%2Fartificial-intelligence-end-world-overblown-fears%2F985813001%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-144"><span class="mw-cite-backlink"><b><a href="#cite_ref-144">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://today.yougov.com/topics/technology/articles-reports/2023/04/14/ai-nuclear-weapons-world-war-humanity-poll">"AI doomsday worries many Americans. So does apocalypse from climate change, nukes, war, and more"</a>. 14 April 2023. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230623095224/https://today.yougov.com/topics/technology/articles-reports/2023/04/14/ai-nuclear-weapons-world-war-humanity-poll">Archived</a> from the original on 23 June 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">9 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.atitle=AI+doomsday+worries+many+Americans.+So+does+apocalypse+from+climate+change%2C+nukes%2C+war%2C+and+more&amp;rft.date=2023-04-14&amp;rft_id=https%3A%2F%2Ftoday.yougov.com%2Ftopics%2Ftechnology%2Farticles-reports%2F2023%2F04%2F14%2Fai-nuclear-weapons-world-war-humanity-poll&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-145"><span class="mw-cite-backlink"><b><a href="#cite_ref-145">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTysonKikuchi2023" class="citation web cs1">Tyson, Alec; Kikuchi, Emma (28 August 2023). <a rel="nofollow" class="external text" href="https://www.pewresearch.org/short-reads/2023/08/28/growing-public-concern-about-the-role-of-artificial-intelligence-in-daily-life/">"Growing public concern about the role of artificial intelligence in daily life"</a>. <i>Pew Research Center</i><span class="reference-accessdate">. Retrieved <span class="nowrap">17 September</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Pew+Research+Center&amp;rft.atitle=Growing+public+concern+about+the+role+of+artificial+intelligence+in+daily+life&amp;rft.date=2023-08-28&amp;rft.aulast=Tyson&amp;rft.aufirst=Alec&amp;rft.au=Kikuchi%2C+Emma&amp;rft_id=https%3A%2F%2Fwww.pewresearch.org%2Fshort-reads%2F2023%2F08%2F28%2Fgrowing-public-concern-about-the-role-of-artificial-intelligence-in-daily-life%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-physica_scripta-146"><span class="mw-cite-backlink"><b><a href="#cite_ref-physica_scripta_146-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSotalaYampolskiy2014" class="citation journal cs1">Sotala, Kaj; <a href="/wiki/Roman_Yampolskiy" title="Roman Yampolskiy">Yampolskiy, Roman</a> (19 December 2014). "Responses to catastrophic AGI risk: a survey". <i><a href="/wiki/Physica_Scripta" title="Physica Scripta">Physica Scripta</a></i>. <b>90</b> (1).</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Physica+Scripta&amp;rft.atitle=Responses+to+catastrophic+AGI+risk%3A+a+survey&amp;rft.volume=90&amp;rft.issue=1&amp;rft.date=2014-12-19&amp;rft.aulast=Sotala&amp;rft.aufirst=Kaj&amp;rft.au=Yampolskiy%2C+Roman&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-147"><span class="mw-cite-backlink"><b><a href="#cite_ref-147">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBarrettBaum2016" class="citation journal cs1">Barrett, Anthony M.; Baum, Seth D. (23 May 2016). "A model of pathways to artificial superintelligence catastrophe for risk and decision analysis". <i>Journal of Experimental &amp; Theoretical Artificial Intelligence</i>. <b>29</b> (2): 397–414. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1607.07730">1607.07730</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1080%2F0952813x.2016.1186228">10.1080/0952813x.2016.1186228</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0952-813X">0952-813X</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:928824">928824</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Journal+of+Experimental+%26+Theoretical+Artificial+Intelligence&amp;rft.atitle=A+model+of+pathways+to+artificial+superintelligence+catastrophe+for+risk+and+decision+analysis&amp;rft.volume=29&amp;rft.issue=2&amp;rft.pages=397-414&amp;rft.date=2016-05-23&amp;rft_id=info%3Aarxiv%2F1607.07730&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A928824%23id-name%3DS2CID&amp;rft.issn=0952-813X&amp;rft_id=info%3Adoi%2F10.1080%2F0952813x.2016.1186228&amp;rft.aulast=Barrett&amp;rft.aufirst=Anthony+M.&amp;rft.au=Baum%2C+Seth+D.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-148"><span class="mw-cite-backlink"><b><a href="#cite_ref-148">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSotalaYampolskiy2014" class="citation journal cs1">Sotala, Kaj; Yampolskiy, Roman V (19 December 2014). <a rel="nofollow" class="external text" href="https://doi.org/10.1088%2F0031-8949%2F90%2F1%2F018001">"Responses to catastrophic AGI risk: a survey"</a>. <i>Physica Scripta</i>. <b>90</b> (1): 018001. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2015PhyS...90a8001S">2015PhyS...90a8001S</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1088%2F0031-8949%2F90%2F1%2F018001">10.1088/0031-8949/90/1/018001</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0031-8949">0031-8949</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:4749656">4749656</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Physica+Scripta&amp;rft.atitle=Responses+to+catastrophic+AGI+risk%3A+a+survey&amp;rft.volume=90&amp;rft.issue=1&amp;rft.pages=018001&amp;rft.date=2014-12-19&amp;rft_id=info%3Adoi%2F10.1088%2F0031-8949%2F90%2F1%2F018001&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A4749656%23id-name%3DS2CID&amp;rft.issn=0031-8949&amp;rft_id=info%3Abibcode%2F2015PhyS...90a8001S&amp;rft.aulast=Sotala&amp;rft.aufirst=Kaj&amp;rft.au=Yampolskiy%2C+Roman+V&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1088%252F0031-8949%252F90%252F1%252F018001&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-149"><span class="mw-cite-backlink"><b><a href="#cite_ref-149">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRamamoorthyYampolskiy2018" class="citation journal cs1">Ramamoorthy, Anand; Yampolskiy, Roman (2018). <a rel="nofollow" class="external text" href="https://www.itu.int/pub/S-JOURNAL-ICTS.V1I1-2018-9">"Beyond MAD? The race for artificial general intelligence"</a>. <i>ICT Discoveries</i>. <b>1</b> (Special Issue 1). ITU: 1–8. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220107141537/https://www.itu.int/pub/S-JOURNAL-ICTS.V1I1-2018-9">Archived</a> from the original on 7 January 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">7 January</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=ICT+Discoveries&amp;rft.atitle=Beyond+MAD%3F+The+race+for+artificial+general+intelligence&amp;rft.volume=1&amp;rft.issue=Special+Issue+1&amp;rft.pages=1-8&amp;rft.date=2018&amp;rft.aulast=Ramamoorthy&amp;rft.aufirst=Anand&amp;rft.au=Yampolskiy%2C+Roman&amp;rft_id=https%3A%2F%2Fwww.itu.int%2Fpub%2FS-JOURNAL-ICTS.V1I1-2018-9&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-150"><span class="mw-cite-backlink"><b><a href="#cite_ref-150">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCarayannisDraper2022" class="citation journal cs1">Carayannis, Elias G.; Draper, John (11 January 2022). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC8748529">"Optimising peace through a Universal Global Peace Treaty to constrain the risk of war from a militarised artificial superintelligence"</a>. <i>AI &amp; Society</i>. <b>38</b> (6): 2679–2692. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs00146-021-01382-y">10.1007/s00146-021-01382-y</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0951-5666">0951-5666</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a>&#160;<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC8748529">8748529</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a>&#160;<a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/35035113">35035113</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:245877737">245877737</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=AI+%26+Society&amp;rft.atitle=Optimising+peace+through+a+Universal+Global+Peace+Treaty+to+constrain+the+risk+of+war+from+a+militarised+artificial+superintelligence&amp;rft.volume=38&amp;rft.issue=6&amp;rft.pages=2679-2692&amp;rft.date=2022-01-11&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC8748529%23id-name%3DPMC&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A245877737%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1007%2Fs00146-021-01382-y&amp;rft.issn=0951-5666&amp;rft_id=info%3Apmid%2F35035113&amp;rft.aulast=Carayannis&amp;rft.aufirst=Elias+G.&amp;rft.au=Draper%2C+John&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC8748529&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-151"><span class="mw-cite-backlink"><b><a href="#cite_ref-151">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCarayannisDraper2023" class="citation cs2">Carayannis, Elias G.; Draper, John (30 May 2023), <a rel="nofollow" class="external text" href="https://www.elgaronline.com/edcollchap/book/9781839109362/book-part-9781839109362-8.xml">"The challenge of advanced cyberwar and the place of cyberpeace"</a>, <i>The Elgar Companion to Digital Transformation, Artificial Intelligence and Innovation in the Economy, Society and Democracy</i>, Edward Elgar Publishing, pp.&#160;32–80, <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.4337%2F9781839109362.00008">10.4337/9781839109362.00008</a>, <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-1-83910-936-2" title="Special:BookSources/978-1-83910-936-2"><bdi>978-1-83910-936-2</bdi></a><span class="reference-accessdate">, retrieved <span class="nowrap">8 June</span> 2023</span></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Elgar+Companion+to+Digital+Transformation%2C+Artificial+Intelligence+and+Innovation+in+the+Economy%2C+Society+and+Democracy&amp;rft.atitle=The+challenge+of+advanced+cyberwar+and+the+place+of+cyberpeace&amp;rft.pages=32-80&amp;rft.date=2023-05-30&amp;rft_id=info%3Adoi%2F10.4337%2F9781839109362.00008&amp;rft.isbn=978-1-83910-936-2&amp;rft.aulast=Carayannis&amp;rft.aufirst=Elias+G.&amp;rft.au=Draper%2C+John&amp;rft_id=https%3A%2F%2Fwww.elgaronline.com%2Fedcollchap%2Fbook%2F9781839109362%2Fbook-part-9781839109362-8.xml&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span>.</span> </li> <li id="cite_note-152"><span class="mw-cite-backlink"><b><a href="#cite_ref-152">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFVincent2016" class="citation news cs1">Vincent, James (22 June 2016). <a rel="nofollow" class="external text" href="https://www.theverge.com/circuitbreaker/2016/6/22/11999664/google-robots-ai-safety-five-problems">"Google's AI researchers say these are the five key problems for robot safety"</a>. <i>The Verge</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20191224201240/https://www.theverge.com/circuitbreaker/2016/6/22/11999664/google-robots-ai-safety-five-problems">Archived</a> from the original on 24 December 2019<span class="reference-accessdate">. Retrieved <span class="nowrap">5 April</span> 2020</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Verge&amp;rft.atitle=Google%27s+AI+researchers+say+these+are+the+five+key+problems+for+robot+safety&amp;rft.date=2016-06-22&amp;rft.aulast=Vincent&amp;rft.aufirst=James&amp;rft_id=https%3A%2F%2Fwww.theverge.com%2Fcircuitbreaker%2F2016%2F6%2F22%2F11999664%2Fgoogle-robots-ai-safety-five-problems&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-153"><span class="mw-cite-backlink"><b><a href="#cite_ref-153">^</a></b></span> <span class="reference-text">Amodei, Dario, Chris Olah, Jacob Steinhardt, Paul Christiano, John Schulman, and Dan Mané. "Concrete problems in AI safety." arXiv preprint arXiv:1606.06565 (2016).</span> </li> <li id="cite_note-154"><span class="mw-cite-backlink"><b><a href="#cite_ref-154">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFJohnson2019" class="citation news cs1">Johnson, Alex (2019). <a rel="nofollow" class="external text" href="https://www.nbcnews.com/mach/tech/elon-musk-wants-hook-your-brain-directly-computers-starting-next-ncna1030631">"Elon Musk wants to hook your brain up directly to computers – starting next year"</a>. <i>NBC News</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200418094146/https://www.nbcnews.com/mach/tech/elon-musk-wants-hook-your-brain-directly-computers-starting-next-ncna1030631">Archived</a> from the original on 18 April 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">5 April</span> 2020</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=NBC+News&amp;rft.atitle=Elon+Musk+wants+to+hook+your+brain+up+directly+to+computers+%E2%80%93+starting+next+year&amp;rft.date=2019&amp;rft.aulast=Johnson&amp;rft.aufirst=Alex&amp;rft_id=https%3A%2F%2Fwww.nbcnews.com%2Fmach%2Ftech%2Felon-musk-wants-hook-your-brain-directly-computers-starting-next-ncna1030631&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-155"><span class="mw-cite-backlink"><b><a href="#cite_ref-155">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTorres2018" class="citation news cs1">Torres, Phil (18 September 2018). <a rel="nofollow" class="external text" href="https://slate.com/technology/2018/09/genetic-engineering-to-stop-doomsday.html">"Only Radically Enhancing Humanity Can Save Us All"</a>. <i>Slate Magazine</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200806073520/https://slate.com/technology/2018/09/genetic-engineering-to-stop-doomsday.html">Archived</a> from the original on 6 August 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">5 April</span> 2020</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Slate+Magazine&amp;rft.atitle=Only+Radically+Enhancing+Humanity+Can+Save+Us+All&amp;rft.date=2018-09-18&amp;rft.aulast=Torres&amp;rft.aufirst=Phil&amp;rft_id=https%3A%2F%2Fslate.com%2Ftechnology%2F2018%2F09%2Fgenetic-engineering-to-stop-doomsday.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-156"><span class="mw-cite-backlink"><b><a href="#cite_ref-156">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBarrettBaum2016" class="citation journal cs1">Barrett, Anthony M.; Baum, Seth D. (23 May 2016). "A model of pathways to artificial superintelligence catastrophe for risk and decision analysis". <i>Journal of Experimental &amp; Theoretical Artificial Intelligence</i>. <b>29</b> (2): 397–414. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1607.07730">1607.07730</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1080%2F0952813X.2016.1186228">10.1080/0952813X.2016.1186228</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:928824">928824</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Journal+of+Experimental+%26+Theoretical+Artificial+Intelligence&amp;rft.atitle=A+model+of+pathways+to+artificial+superintelligence+catastrophe+for+risk+and+decision+analysis&amp;rft.volume=29&amp;rft.issue=2&amp;rft.pages=397-414&amp;rft.date=2016-05-23&amp;rft_id=info%3Aarxiv%2F1607.07730&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A928824%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1080%2F0952813X.2016.1186228&amp;rft.aulast=Barrett&amp;rft.aufirst=Anthony+M.&amp;rft.au=Baum%2C+Seth+D.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-aiamnesia-157"><span class="mw-cite-backlink"><b><a href="#cite_ref-aiamnesia_157-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTkachenko2024" class="citation journal cs1">Tkachenko, Yegor (2024). <a rel="nofollow" class="external text" href="https://proceedings.mlr.press/v235/tkachenko24a.html">"Position: Enforced Amnesia as a Way to Mitigate the Potential Risk of Silent Suffering in the Conscious AI"</a>. <i>Proceedings of the 41st International Conference on Machine Learning</i>. PMLR.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Proceedings+of+the+41st+International+Conference+on+Machine+Learning&amp;rft.atitle=Position%3A+Enforced+Amnesia+as+a+Way+to+Mitigate+the+Potential+Risk+of+Silent+Suffering+in+the+Conscious+AI&amp;rft.date=2024&amp;rft.aulast=Tkachenko&amp;rft.aufirst=Yegor&amp;rft_id=https%3A%2F%2Fproceedings.mlr.press%2Fv235%2Ftkachenko24a.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-158"><span class="mw-cite-backlink"><b><a href="#cite_ref-158">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFPiper2023" class="citation web cs1">Piper, Kelsey (29 March 2023). <a rel="nofollow" class="external text" href="https://www.vox.com/future-perfect/2023/3/29/23661633/gpt-4-openai-alignment-research-center-open-philanthropy-ai-safety">"How to test what an AI model can – and shouldn't – do"</a>. <i>Vox</i><span class="reference-accessdate">. Retrieved <span class="nowrap">28 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Vox&amp;rft.atitle=How+to+test+what+an+AI+model+can+%E2%80%93+and+shouldn%27t+%E2%80%93+do&amp;rft.date=2023-03-29&amp;rft.aulast=Piper&amp;rft.aufirst=Kelsey&amp;rft_id=https%3A%2F%2Fwww.vox.com%2Ffuture-perfect%2F2023%2F3%2F29%2F23661633%2Fgpt-4-openai-alignment-research-center-open-philanthropy-ai-safety&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-159"><span class="mw-cite-backlink"><b><a href="#cite_ref-159">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFPiesing2012" class="citation magazine cs1">Piesing, Mark (17 May 2012). <a rel="nofollow" class="external text" href="https://www.wired.co.uk/news/archive/2012-05/17/the-dangers-of-an-ai-smarter-than-us">"AI uprising: humans will be outsourced, not obliterated"</a>. <i>Wired</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20140407041151/http://www.wired.co.uk/news/archive/2012-05/17/the-dangers-of-an-ai-smarter-than-us">Archived</a> from the original on 7 April 2014<span class="reference-accessdate">. Retrieved <span class="nowrap">12 December</span> 2015</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Wired&amp;rft.atitle=AI+uprising%3A+humans+will+be+outsourced%2C+not+obliterated&amp;rft.date=2012-05-17&amp;rft.aulast=Piesing&amp;rft.aufirst=Mark&amp;rft_id=https%3A%2F%2Fwww.wired.co.uk%2Fnews%2Farchive%2F2012-05%2F17%2Fthe-dangers-of-an-ai-smarter-than-us&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-160"><span class="mw-cite-backlink"><b><a href="#cite_ref-160">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCoughlan2013" class="citation news cs1">Coughlan, Sean (24 April 2013). <a rel="nofollow" class="external text" href="https://www.bbc.com/news/business-22002530">"How are humans going to become extinct?"</a>. <i>BBC News</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20140309003706/http://www.bbc.com/news/business-22002530">Archived</a> from the original on 9 March 2014<span class="reference-accessdate">. Retrieved <span class="nowrap">29 March</span> 2014</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=BBC+News&amp;rft.atitle=How+are+humans+going+to+become+extinct%3F&amp;rft.date=2013-04-24&amp;rft.aulast=Coughlan&amp;rft.aufirst=Sean&amp;rft_id=https%3A%2F%2Fwww.bbc.com%2Fnews%2Fbusiness-22002530&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-161"><span class="mw-cite-backlink"><b><a href="#cite_ref-161">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBridge2017" class="citation news cs1">Bridge, Mark (10 June 2017). <a rel="nofollow" class="external text" href="https://www.thetimes.co.uk/article/making-robots-less-confident-could-prevent-them-taking-over-gnsblq7lx">"Making robots less confident could prevent them taking over"</a>. <i>The Times</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180321133426/https://www.thetimes.co.uk/article/making-robots-less-confident-could-prevent-them-taking-over-gnsblq7lx">Archived</a> from the original on 21 March 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">21 March</span> 2018</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Times&amp;rft.atitle=Making+robots+less+confident+could+prevent+them+taking+over&amp;rft.date=2017-06-10&amp;rft.aulast=Bridge&amp;rft.aufirst=Mark&amp;rft_id=https%3A%2F%2Fwww.thetimes.co.uk%2Farticle%2Fmaking-robots-less-confident-could-prevent-them-taking-over-gnsblq7lx&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-162"><span class="mw-cite-backlink"><b><a href="#cite_ref-162">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMcGinnis2010" class="citation journal cs1"><a href="/wiki/John_McGinnis" title="John McGinnis">McGinnis, John</a> (Summer 2010). <a rel="nofollow" class="external text" href="http://scholarlycommons.law.northwestern.edu/cgi/viewcontent.cgi?article=1193&amp;context=nulr_online">"Accelerating AI"</a>. <i><a href="/wiki/Northwestern_University_Law_Review" title="Northwestern University Law Review">Northwestern University Law Review</a></i>. <b>104</b> (3): 1253–1270. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160215073656/http://scholarlycommons.law.northwestern.edu/cgi/viewcontent.cgi?article=1193&amp;context=nulr_online">Archived</a> from the original on 15 February 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">16 July</span> 2014</span>. <q>For all these reasons, verifying a global relinquishment treaty, or even one limited to AI-related weapons development, is a nonstarter... (For different reasons from ours, the Machine Intelligence Research Institute) considers (AGI) relinquishment infeasible...</q></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Northwestern+University+Law+Review&amp;rft.atitle=Accelerating+AI&amp;rft.ssn=summer&amp;rft.volume=104&amp;rft.issue=3&amp;rft.pages=1253-1270&amp;rft.date=2010&amp;rft.aulast=McGinnis&amp;rft.aufirst=John&amp;rft_id=http%3A%2F%2Fscholarlycommons.law.northwestern.edu%2Fcgi%2Fviewcontent.cgi%3Farticle%3D1193%26context%3Dnulr_online&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-163"><span class="mw-cite-backlink"><b><a href="#cite_ref-163">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSotalaYampolskiy2014" class="citation journal cs1">Sotala, Kaj; <a href="/wiki/Roman_Yampolskiy" title="Roman Yampolskiy">Yampolskiy, Roman</a> (19 December 2014). "Responses to catastrophic AGI risk: a survey". <i><a href="/wiki/Physica_Scripta" title="Physica Scripta">Physica Scripta</a></i>. <b>90</b> (1). <q>In general, most writers reject proposals for broad relinquishment... Relinquishment proposals suffer from many of the same problems as regulation proposals, but to a greater extent. There is no historical precedent of general, multi-use technology similar to AGI being successfully relinquished for good, nor do there seem to be any theoretical reasons for believing that relinquishment proposals would work in the future. Therefore we do not consider them to be a viable class of proposals.</q></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Physica+Scripta&amp;rft.atitle=Responses+to+catastrophic+AGI+risk%3A+a+survey&amp;rft.volume=90&amp;rft.issue=1&amp;rft.date=2014-12-19&amp;rft.aulast=Sotala&amp;rft.aufirst=Kaj&amp;rft.au=Yampolskiy%2C+Roman&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-164"><span class="mw-cite-backlink"><b><a href="#cite_ref-164">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAllenby2016" class="citation news cs1">Allenby, Brad (11 April 2016). <a rel="nofollow" class="external text" href="http://www.slate.com/articles/technology/future_tense/2016/04/why_it_s_a_mistake_to_compare_a_i_with_human_intelligence.html">"The Wrong Cognitive Measuring Stick"</a>. <i>Slate</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160515114003/http://www.slate.com/articles/technology/future_tense/2016/04/why_it_s_a_mistake_to_compare_a_i_with_human_intelligence.html">Archived</a> from the original on 15 May 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">15 May</span> 2016</span>. <q>It is fantasy to suggest that the accelerating development and deployment of technologies that taken together are considered to be A.I. will be stopped or limited, either by regulation or even by national legislation.</q></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Slate&amp;rft.atitle=The+Wrong+Cognitive+Measuring+Stick&amp;rft.date=2016-04-11&amp;rft.aulast=Allenby&amp;rft.aufirst=Brad&amp;rft_id=http%3A%2F%2Fwww.slate.com%2Farticles%2Ftechnology%2Ffuture_tense%2F2016%2F04%2Fwhy_it_s_a_mistake_to_compare_a_i_with_human_intelligence.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-:7-165"><span class="mw-cite-backlink">^ <a href="#cite_ref-:7_165-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:7_165-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFYampolskiy2022" class="citation book cs1">Yampolskiy, Roman V. (2022). <a rel="nofollow" class="external text" href="https://link.springer.com/chapter/10.1007/978-3-031-09153-7_18">"AI Risk Skepticism"</a>. In Müller, Vincent C. (ed.). <i>Philosophy and Theory of Artificial Intelligence 2021</i>. Studies in Applied Philosophy, Epistemology and Rational Ethics. Vol.&#160;63. Cham: Springer International Publishing. pp.&#160;225–248. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2F978-3-031-09153-7_18">10.1007/978-3-031-09153-7_18</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-3-031-09153-7" title="Special:BookSources/978-3-031-09153-7"><bdi>978-3-031-09153-7</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=AI+Risk+Skepticism&amp;rft.btitle=Philosophy+and+Theory+of+Artificial+Intelligence+2021&amp;rft.place=Cham&amp;rft.series=Studies+in+Applied+Philosophy%2C+Epistemology+and+Rational+Ethics&amp;rft.pages=225-248&amp;rft.pub=Springer+International+Publishing&amp;rft.date=2022&amp;rft_id=info%3Adoi%2F10.1007%2F978-3-031-09153-7_18&amp;rft.isbn=978-3-031-09153-7&amp;rft.aulast=Yampolskiy&amp;rft.aufirst=Roman+V.&amp;rft_id=https%3A%2F%2Flink.springer.com%2Fchapter%2F10.1007%2F978-3-031-09153-7_18&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-166"><span class="mw-cite-backlink"><b><a href="#cite_ref-166">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBaum2018" class="citation journal cs1">Baum, Seth (22 August 2018). <a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Finfo9090209">"Superintelligence Skepticism as a Political Tool"</a>. <i>Information</i>. <b>9</b> (9): 209. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Finfo9090209">10.3390/info9090209</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2078-2489">2078-2489</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Information&amp;rft.atitle=Superintelligence+Skepticism+as+a+Political+Tool&amp;rft.volume=9&amp;rft.issue=9&amp;rft.pages=209&amp;rft.date=2018-08-22&amp;rft_id=info%3Adoi%2F10.3390%2Finfo9090209&amp;rft.issn=2078-2489&amp;rft.aulast=Baum&amp;rft.aufirst=Seth&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.3390%252Finfo9090209&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-167"><span class="mw-cite-backlink"><b><a href="#cite_ref-167">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.cnn.com/2023/03/29/tech/ai-letter-elon-musk-tech-leaders/index.html">"Elon Musk and other tech leaders call for pause in 'out of control' AI race"</a>. <i>CNN</i>. 29 March 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">30 March</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=CNN&amp;rft.atitle=Elon+Musk+and+other+tech+leaders+call+for+pause+in+%27out+of+control%27+AI+race&amp;rft.date=2023-03-29&amp;rft_id=https%3A%2F%2Fwww.cnn.com%2F2023%2F03%2F29%2Ftech%2Fai-letter-elon-musk-tech-leaders%2Findex.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-168"><span class="mw-cite-backlink"><b><a href="#cite_ref-168">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://venturebeat.com/ai/open-letter-calling-for-ai-pause-shines-light-on-fierce-debate-around-risks-vs-hype/">"Open letter calling for AI 'pause' shines light on fierce debate around risks vs. hype"</a>. <i>VentureBeat</i>. 29 March 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">20 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=VentureBeat&amp;rft.atitle=Open+letter+calling+for+AI+%27pause%27+shines+light+on+fierce+debate+around+risks+vs.+hype&amp;rft.date=2023-03-29&amp;rft_id=https%3A%2F%2Fventurebeat.com%2Fai%2Fopen-letter-calling-for-ai-pause-shines-light-on-fierce-debate-around-risks-vs-hype%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-169"><span class="mw-cite-backlink"><b><a href="#cite_ref-169">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFVincent2023" class="citation web cs1">Vincent, James (14 April 2023). <a rel="nofollow" class="external text" href="https://www.theverge.com/2023/4/14/23683084/openai-gpt-5-rumors-training-sam-altman">"OpenAI's CEO confirms the company isn't training GPT-5 and "won't for some time"<span class="cs1-kern-right"></span>"</a>. <i>The Verge</i><span class="reference-accessdate">. Retrieved <span class="nowrap">20 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=The+Verge&amp;rft.atitle=OpenAI%27s+CEO+confirms+the+company+isn%27t+training+GPT-5+and+%22won%27t+for+some+time%22&amp;rft.date=2023-04-14&amp;rft.aulast=Vincent&amp;rft.aufirst=James&amp;rft_id=https%3A%2F%2Fwww.theverge.com%2F2023%2F4%2F14%2F23683084%2Fopenai-gpt-5-rumors-training-sam-altman&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-170"><span class="mw-cite-backlink"><b><a href="#cite_ref-170">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation magazine cs1"><a rel="nofollow" class="external text" href="https://time.com/6266923/ai-eliezer-yudkowsky-open-letter-not-enough/">"The Open Letter on AI Doesn't Go Far Enough"</a>. <i>Time</i>. 29 March 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">20 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Time&amp;rft.atitle=The+Open+Letter+on+AI+Doesn%27t+Go+Far+Enough&amp;rft.date=2023-03-29&amp;rft_id=https%3A%2F%2Ftime.com%2F6266923%2Fai-eliezer-yudkowsky-open-letter-not-enough%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-171"><span class="mw-cite-backlink"><b><a href="#cite_ref-171">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDomonoske2017" class="citation news cs1">Domonoske, Camila (17 July 2017). <a rel="nofollow" class="external text" href="https://www.npr.org/sections/thetwo-way/2017/07/17/537686649/elon-musk-warns-governors-artificial-intelligence-poses-existential-risk">"Elon Musk Warns Governors: Artificial Intelligence Poses 'Existential Risk'<span class="cs1-kern-right"></span>"</a>. <i>NPR</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200423135755/https://www.npr.org/sections/thetwo-way/2017/07/17/537686649/elon-musk-warns-governors-artificial-intelligence-poses-existential-risk">Archived</a> from the original on 23 April 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">27 November</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=NPR&amp;rft.atitle=Elon+Musk+Warns+Governors%3A+Artificial+Intelligence+Poses+%27Existential+Risk%27&amp;rft.date=2017-07-17&amp;rft.aulast=Domonoske&amp;rft.aufirst=Camila&amp;rft_id=https%3A%2F%2Fwww.npr.org%2Fsections%2Fthetwo-way%2F2017%2F07%2F17%2F537686649%2Felon-musk-warns-governors-artificial-intelligence-poses-existential-risk&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-172"><span class="mw-cite-backlink"><b><a href="#cite_ref-172">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGibbs2017" class="citation news cs1">Gibbs, Samuel (17 July 2017). <a rel="nofollow" class="external text" href="https://www.theguardian.com/technology/2017/jul/17/elon-musk-regulation-ai-combat-existential-threat-tesla-spacex-ceo">"Elon Musk: regulate AI to combat 'existential threat' before it's too late"</a>. <i>The Guardian</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200606072024/https://www.theguardian.com/technology/2017/jul/17/elon-musk-regulation-ai-combat-existential-threat-tesla-spacex-ceo">Archived</a> from the original on 6 June 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">27 November</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Guardian&amp;rft.atitle=Elon+Musk%3A+regulate+AI+to+combat+%27existential+threat%27+before+it%27s+too+late&amp;rft.date=2017-07-17&amp;rft.aulast=Gibbs&amp;rft.aufirst=Samuel&amp;rft_id=https%3A%2F%2Fwww.theguardian.com%2Ftechnology%2F2017%2Fjul%2F17%2Felon-musk-regulation-ai-combat-existential-threat-tesla-spacex-ceo&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-cnbc2-173"><span class="mw-cite-backlink"><b><a href="#cite_ref-cnbc2_173-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKharpal2017" class="citation news cs1">Kharpal, Arjun (7 November 2017). <a rel="nofollow" class="external text" href="https://www.cnbc.com/2017/11/07/ai-infancy-and-too-early-to-regulate-intel-ceo-brian-krzanich-says.html">"A.I. is in its 'infancy' and it's too early to regulate it, Intel CEO Brian Krzanich says"</a>. <i>CNBC</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200322115325/https://www.cnbc.com/2017/11/07/ai-infancy-and-too-early-to-regulate-intel-ceo-brian-krzanich-says.html">Archived</a> from the original on 22 March 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">27 November</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=CNBC&amp;rft.atitle=A.I.+is+in+its+%27infancy%27+and+it%27s+too+early+to+regulate+it%2C+Intel+CEO+Brian+Krzanich+says&amp;rft.date=2017-11-07&amp;rft.aulast=Kharpal&amp;rft.aufirst=Arjun&amp;rft_id=https%3A%2F%2Fwww.cnbc.com%2F2017%2F11%2F07%2Fai-infancy-and-too-early-to-regulate-intel-ceo-brian-krzanich-says.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-174"><span class="mw-cite-backlink"><b><a href="#cite_ref-174">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDawes2021" class="citation web cs1">Dawes, James (20 December 2021). <a rel="nofollow" class="external text" href="https://theconversation.com/un-fails-to-agree-on-killer-robot-ban-as-nations-pour-billions-into-autonomous-weapons-research-173616">"UN fails to agree on 'killer robot' ban as nations pour billions into autonomous weapons research"</a>. <i>The Conversation</i><span class="reference-accessdate">. Retrieved <span class="nowrap">28 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=The+Conversation&amp;rft.atitle=UN+fails+to+agree+on+%27killer+robot%27+ban+as+nations+pour+billions+into+autonomous+weapons+research&amp;rft.date=2021-12-20&amp;rft.aulast=Dawes&amp;rft.aufirst=James&amp;rft_id=http%3A%2F%2Ftheconversation.com%2Fun-fails-to-agree-on-killer-robot-ban-as-nations-pour-billions-into-autonomous-weapons-research-173616&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-:13-175"><span class="mw-cite-backlink">^ <a href="#cite_ref-:13_175-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:13_175-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFassihi2023" class="citation news cs1">Fassihi, Farnaz (18 July 2023). <a rel="nofollow" class="external text" href="https://www.nytimes.com/2023/07/18/world/un-security-council-ai.html">"U.N. Officials Urge Regulation of Artificial Intelligence"</a>. <i>The New York Times</i>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0362-4331">0362-4331</a><span class="reference-accessdate">. Retrieved <span class="nowrap">20 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+New+York+Times&amp;rft.atitle=U.N.+Officials+Urge+Regulation+of+Artificial+Intelligence&amp;rft.date=2023-07-18&amp;rft.issn=0362-4331&amp;rft.aulast=Fassihi&amp;rft.aufirst=Farnaz&amp;rft_id=https%3A%2F%2Fwww.nytimes.com%2F2023%2F07%2F18%2Fworld%2Fun-security-council-ai.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-176"><span class="mw-cite-backlink"><b><a href="#cite_ref-176">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://press.un.org/en/2023/sc15359.doc.htm">"International Community Must Urgently Confront New Reality of Generative, Artificial Intelligence, Speakers Stress as Security Council Debates Risks, Rewards"</a>. <i>United Nations</i><span class="reference-accessdate">. Retrieved <span class="nowrap">20 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=United+Nations&amp;rft.atitle=International+Community+Must+Urgently+Confront+New+Reality+of+Generative%2C+Artificial+Intelligence%2C+Speakers+Stress+as+Security+Council+Debates+Risks%2C+Rewards&amp;rft_id=https%3A%2F%2Fpress.un.org%2Fen%2F2023%2Fsc15359.doc.htm&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-:532-177"><span class="mw-cite-backlink"><b><a href="#cite_ref-:532_177-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSotalaYampolskiy2014" class="citation journal cs1">Sotala, Kaj; Yampolskiy, Roman V. (19 December 2014). <a rel="nofollow" class="external text" href="https://doi.org/10.1088%2F0031-8949%2F90%2F1%2F018001">"Responses to catastrophic AGI risk: a survey"</a>. <i>Physica Scripta</i>. <b>90</b> (1): 018001. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2015PhyS...90a8001S">2015PhyS...90a8001S</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1088%2F0031-8949%2F90%2F1%2F018001">10.1088/0031-8949/90/1/018001</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0031-8949">0031-8949</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Physica+Scripta&amp;rft.atitle=Responses+to+catastrophic+AGI+risk%3A+a+survey&amp;rft.volume=90&amp;rft.issue=1&amp;rft.pages=018001&amp;rft.date=2014-12-19&amp;rft.issn=0031-8949&amp;rft_id=info%3Adoi%2F10.1088%2F0031-8949%2F90%2F1%2F018001&amp;rft_id=info%3Abibcode%2F2015PhyS...90a8001S&amp;rft.aulast=Sotala&amp;rft.aufirst=Kaj&amp;rft.au=Yampolskiy%2C+Roman+V.&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1088%252F0031-8949%252F90%252F1%252F018001&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-178"><span class="mw-cite-backlink"><b><a href="#cite_ref-178">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGeist2016" class="citation journal cs1">Geist, Edward Moore (15 August 2016). "It's already too late to stop the AI arms race—We must manage it instead". <i>Bulletin of the Atomic Scientists</i>. <b>72</b> (5): 318–321. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2016BuAtS..72e.318G">2016BuAtS..72e.318G</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1080%2F00963402.2016.1216672">10.1080/00963402.2016.1216672</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0096-3402">0096-3402</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:151967826">151967826</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Bulletin+of+the+Atomic+Scientists&amp;rft.atitle=It%27s+already+too+late+to+stop+the+AI+arms+race%E2%80%94We+must+manage+it+instead&amp;rft.volume=72&amp;rft.issue=5&amp;rft.pages=318-321&amp;rft.date=2016-08-15&amp;rft_id=info%3Adoi%2F10.1080%2F00963402.2016.1216672&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A151967826%23id-name%3DS2CID&amp;rft.issn=0096-3402&amp;rft_id=info%3Abibcode%2F2016BuAtS..72e.318G&amp;rft.aulast=Geist&amp;rft.aufirst=Edward+Moore&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-179"><span class="mw-cite-backlink"><b><a href="#cite_ref-179">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://apnews.com/article/artificial-intelligence-safeguards-joe-biden-kamala-harris-4caf02b94275429f764b06840897436c">"Amazon, Google, Meta, Microsoft and other tech firms agree to AI safeguards set by the White House"</a>. <i>AP News</i>. 21 July 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">21 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=AP+News&amp;rft.atitle=Amazon%2C+Google%2C+Meta%2C+Microsoft+and+other+tech+firms+agree+to+AI+safeguards+set+by+the+White+House&amp;rft.date=2023-07-21&amp;rft_id=https%3A%2F%2Fapnews.com%2Farticle%2Fartificial-intelligence-safeguards-joe-biden-kamala-harris-4caf02b94275429f764b06840897436c&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-180"><span class="mw-cite-backlink"><b><a href="#cite_ref-180">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.redditchadvertiser.co.uk/news/national/23670894.amazon-google-meta-microsoft-firms-agree-ai-safeguards/">"Amazon, Google, Meta, Microsoft and other firms agree to AI safeguards"</a>. <i>Redditch Advertiser</i>. 21 July 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">21 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Redditch+Advertiser&amp;rft.atitle=Amazon%2C+Google%2C+Meta%2C+Microsoft+and+other+firms+agree+to+AI+safeguards&amp;rft.date=2023-07-21&amp;rft_id=https%3A%2F%2Fwww.redditchadvertiser.co.uk%2Fnews%2Fnational%2F23670894.amazon-google-meta-microsoft-firms-agree-ai-safeguards%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-181"><span class="mw-cite-backlink"><b><a href="#cite_ref-181">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFThe_White_House2023" class="citation web cs1">The White House (30 October 2023). <a rel="nofollow" class="external text" href="https://www.whitehouse.gov/briefing-room/presidential-actions/2023/10/30/executive-order-on-the-safe-secure-and-trustworthy-development-and-use-of-artificial-intelligence/">"Executive Order on the Safe, Secure, and Trustworthy Development and Use of Artificial Intelligence"</a>. <i>The White House</i><span class="reference-accessdate">. Retrieved <span class="nowrap">19 December</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=The+White+House&amp;rft.atitle=Executive+Order+on+the+Safe%2C+Secure%2C+and+Trustworthy+Development+and+Use+of+Artificial+Intelligence&amp;rft.date=2023-10-30&amp;rft.au=The+White+House&amp;rft_id=https%3A%2F%2Fwww.whitehouse.gov%2Fbriefing-room%2Fpresidential-actions%2F2023%2F10%2F30%2Fexecutive-order-on-the-safe-secure-and-trustworthy-development-and-use-of-artificial-intelligence%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></span> </li> </ol></div></div> <div class="mw-heading mw-heading2"><h2 id="Bibliography">Bibliography</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;action=edit&amp;section=39" title="Edit section: Bibliography"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <ul><li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFClark2015a" class="citation news cs1">Clark, Jack (2015a). <span class="id-lock-subscription" title="Paid subscription required"><a rel="nofollow" class="external text" href="https://www.bloomberg.com/news/articles/2015-07-01/musk-backed-group-probes-risks-behind-artificial-intelligence">"Musk-Backed Group Probes Risks Behind Artificial Intelligence"</a></span>. <i>Bloomberg.com</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20151030202356/http://www.bloomberg.com/news/articles/2015-07-01/musk-backed-group-probes-risks-behind-artificial-intelligence">Archived</a> from the original on 30 October 2015<span class="reference-accessdate">. Retrieved <span class="nowrap">30 October</span> 2015</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Bloomberg.com&amp;rft.atitle=Musk-Backed+Group+Probes+Risks+Behind+Artificial+Intelligence&amp;rft.date=2015&amp;rft.aulast=Clark&amp;rft.aufirst=Jack&amp;rft_id=https%3A%2F%2Fwww.bloomberg.com%2Fnews%2Farticles%2F2015-07-01%2Fmusk-backed-group-probes-risks-behind-artificial-intelligence&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AExistential+risk+from+artificial+intelligence" class="Z3988"></span></li></ul> <div style="clear:both;" class=""></div> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><style data-mw-deduplicate="TemplateStyles:r1236075235">.mw-parser-output .navbox{box-sizing:border-box;border:1px solid #a2a9b1;width:100%;clear:both;font-size:88%;text-align:center;padding:1px;margin:1em auto 0}.mw-parser-output .navbox .navbox{margin-top:0}.mw-parser-output .navbox+.navbox,.mw-parser-output .navbox+.navbox-styles+.navbox{margin-top:-1px}.mw-parser-output .navbox-inner,.mw-parser-output .navbox-subgroup{width:100%}.mw-parser-output .navbox-group,.mw-parser-output .navbox-title,.mw-parser-output .navbox-abovebelow{padding:0.25em 1em;line-height:1.5em;text-align:center}.mw-parser-output .navbox-group{white-space:nowrap;text-align:right}.mw-parser-output .navbox,.mw-parser-output .navbox-subgroup{background-color:#fdfdfd}.mw-parser-output .navbox-list{line-height:1.5em;border-color:#fdfdfd}.mw-parser-output .navbox-list-with-group{text-align:left;border-left-width:2px;border-left-style:solid}.mw-parser-output tr+tr>.navbox-abovebelow,.mw-parser-output tr+tr>.navbox-group,.mw-parser-output tr+tr>.navbox-image,.mw-parser-output tr+tr>.navbox-list{border-top:2px solid #fdfdfd}.mw-parser-output .navbox-title{background-color:#ccf}.mw-parser-output .navbox-abovebelow,.mw-parser-output .navbox-group,.mw-parser-output .navbox-subgroup .navbox-title{background-color:#ddf}.mw-parser-output .navbox-subgroup .navbox-group,.mw-parser-output .navbox-subgroup .navbox-abovebelow{background-color:#e6e6ff}.mw-parser-output .navbox-even{background-color:#f7f7f7}.mw-parser-output .navbox-odd{background-color:transparent}.mw-parser-output .navbox .hlist td dl,.mw-parser-output .navbox .hlist td ol,.mw-parser-output .navbox .hlist td ul,.mw-parser-output .navbox td.hlist dl,.mw-parser-output .navbox td.hlist ol,.mw-parser-output .navbox td.hlist ul{padding:0.125em 0}.mw-parser-output .navbox .navbar{display:block;font-size:100%}.mw-parser-output .navbox-title .navbar{float:left;text-align:left;margin-right:0.5em}body.skin--responsive .mw-parser-output .navbox-image img{max-width:none!important}@media print{body.ns-0 .mw-parser-output .navbox{display:none!important}}</style></div><div role="navigation" class="navbox" aria-labelledby="Existential_risk_from_artificial_intelligence" style="padding:3px"><table class="nowraplinks mw-collapsible expanded navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th scope="col" class="navbox-title" colspan="2"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1239400231"><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Existential_risk_from_artificial_intelligence" title="Template:Existential risk from artificial intelligence"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/wiki/Template_talk:Existential_risk_from_artificial_intelligence" title="Template talk:Existential risk from artificial intelligence"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Existential_risk_from_artificial_intelligence" title="Special:EditPage/Template:Existential risk from artificial intelligence"><abbr title="Edit this template">e</abbr></a></li></ul></div><div id="Existential_risk_from_artificial_intelligence" style="font-size:114%;margin:0 4em"><a href="/wiki/Existential_risk_from_artificial_general_intelligence" class="mw-redirect" title="Existential risk from artificial general intelligence">Existential risk</a> from <a href="/wiki/Artificial_intelligence" title="Artificial intelligence">artificial intelligence</a></div></th></tr><tr><th scope="row" class="navbox-group" style="width:1%">Concepts</th><td class="navbox-list-with-group navbox-list navbox-odd hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Artificial_general_intelligence" title="Artificial general intelligence">AGI</a></li> <li><a href="/wiki/AI_alignment" title="AI alignment">AI alignment</a></li> <li><a href="/wiki/AI_capability_control" title="AI capability control">AI capability control</a></li> <li><a href="/wiki/AI_safety" title="AI safety">AI safety</a></li> <li><a href="/wiki/AI_takeover" title="AI takeover">AI takeover</a></li> <li><a href="/wiki/Consequentialism" title="Consequentialism">Consequentialism</a></li> <li><a href="/wiki/Effective_accelerationism" title="Effective accelerationism">Effective accelerationism</a></li> <li><a href="/wiki/Ethics_of_artificial_intelligence" title="Ethics of artificial intelligence">Ethics of artificial intelligence</a></li> <li><a href="/wiki/Existential_risk_from_artificial_general_intelligence" class="mw-redirect" title="Existential risk from artificial general intelligence">Existential risk from artificial general intelligence</a></li> <li><a href="/wiki/Friendly_artificial_intelligence" title="Friendly artificial intelligence">Friendly artificial intelligence</a></li> <li><a href="/wiki/Instrumental_convergence" title="Instrumental convergence">Instrumental convergence</a></li> <li><a href="/wiki/Intelligence_explosion" class="mw-redirect" title="Intelligence explosion">Intelligence explosion</a></li> <li><a href="/wiki/Longtermism" title="Longtermism">Longtermism</a></li> <li><a href="/wiki/Machine_ethics" title="Machine ethics">Machine ethics</a></li> <li><a href="/wiki/Suffering_risks" class="mw-redirect" title="Suffering risks">Suffering risks</a></li> <li><a href="/wiki/Superintelligence" title="Superintelligence">Superintelligence</a></li> <li><a href="/wiki/Technological_singularity" title="Technological singularity">Technological singularity</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Organizations</th><td class="navbox-list-with-group navbox-list navbox-even hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Alignment_Research_Center" title="Alignment Research Center">Alignment Research Center</a></li> <li><a href="/wiki/Center_for_AI_Safety" title="Center for AI Safety">Center for AI Safety</a></li> <li><a href="/wiki/Center_for_Applied_Rationality" title="Center for Applied Rationality">Center for Applied Rationality</a></li> <li><a href="/wiki/Center_for_Human-Compatible_Artificial_Intelligence" title="Center for Human-Compatible Artificial Intelligence">Center for Human-Compatible Artificial Intelligence</a></li> <li><a href="/wiki/Centre_for_the_Study_of_Existential_Risk" title="Centre for the Study of Existential Risk">Centre for the Study of Existential Risk</a></li> <li><a href="/wiki/EleutherAI" title="EleutherAI">EleutherAI</a></li> <li><a href="/wiki/Future_of_Humanity_Institute" title="Future of Humanity Institute">Future of Humanity Institute</a></li> <li><a href="/wiki/Future_of_Life_Institute" title="Future of Life Institute">Future of Life Institute</a></li> <li><a href="/wiki/Google_DeepMind" title="Google DeepMind">Google DeepMind</a></li> <li><a href="/wiki/Humanity%2B" title="Humanity+">Humanity+</a></li> <li><a href="/wiki/Institute_for_Ethics_and_Emerging_Technologies" title="Institute for Ethics and Emerging Technologies">Institute for Ethics and Emerging Technologies</a></li> <li><a href="/wiki/Leverhulme_Centre_for_the_Future_of_Intelligence" title="Leverhulme Centre for the Future of Intelligence">Leverhulme Centre for the Future of Intelligence</a></li> <li><a href="/wiki/Machine_Intelligence_Research_Institute" title="Machine Intelligence Research Institute">Machine Intelligence Research Institute</a></li> <li><a href="/wiki/OpenAI" title="OpenAI">OpenAI</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">People</th><td class="navbox-list-with-group navbox-list navbox-odd hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Slate_Star_Codex" title="Slate Star Codex">Scott Alexander</a></li> <li><a href="/wiki/Sam_Altman" title="Sam Altman">Sam Altman</a></li> <li><a href="/wiki/Yoshua_Bengio" title="Yoshua Bengio">Yoshua Bengio</a></li> <li><a href="/wiki/Nick_Bostrom" title="Nick Bostrom">Nick Bostrom</a></li> <li><a href="/wiki/Paul_Christiano_(researcher)" title="Paul Christiano (researcher)">Paul Christiano</a></li> <li><a href="/wiki/K._Eric_Drexler" title="K. Eric Drexler">Eric Drexler</a></li> <li><a href="/wiki/Sam_Harris" title="Sam Harris">Sam Harris</a></li> <li><a href="/wiki/Stephen_Hawking" title="Stephen Hawking">Stephen Hawking</a></li> <li><a href="/wiki/Dan_Hendrycks" title="Dan Hendrycks">Dan Hendrycks</a></li> <li><a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Geoffrey Hinton</a></li> <li><a href="/wiki/Bill_Joy" title="Bill Joy">Bill Joy</a></li> <li><a href="/wiki/Shane_Legg" title="Shane Legg">Shane Legg</a></li> <li><a href="/wiki/Elon_Musk" title="Elon Musk">Elon Musk</a></li> <li><a href="/wiki/Steve_Omohundro" title="Steve Omohundro">Steve Omohundro</a></li> <li><a href="/wiki/Huw_Price" title="Huw Price">Huw Price</a></li> <li><a href="/wiki/Martin_Rees" title="Martin Rees">Martin Rees</a></li> <li><a href="/wiki/Stuart_J._Russell" title="Stuart J. Russell">Stuart J. Russell</a></li> <li><a href="/wiki/Jaan_Tallinn" title="Jaan Tallinn">Jaan Tallinn</a></li> <li><a href="/wiki/Max_Tegmark" title="Max Tegmark">Max Tegmark</a></li> <li><a href="/wiki/Frank_Wilczek" title="Frank Wilczek">Frank Wilczek</a></li> <li><a href="/wiki/Roman_Yampolskiy" title="Roman Yampolskiy">Roman Yampolskiy</a></li> <li><a href="/wiki/Eliezer_Yudkowsky" title="Eliezer Yudkowsky">Eliezer Yudkowsky</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Other</th><td class="navbox-list-with-group navbox-list navbox-even hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Statement_on_AI_risk_of_extinction" title="Statement on AI risk of extinction">Statement on AI risk of extinction</a></li> <li><i><a href="/wiki/Human_Compatible" title="Human Compatible">Human Compatible</a></i></li> <li><a href="/wiki/Open_letter_on_artificial_intelligence_(2015)" title="Open letter on artificial intelligence (2015)">Open letter on artificial intelligence (2015)</a></li> <li><i><a href="/wiki/Our_Final_Invention" title="Our Final Invention">Our Final Invention</a></i></li> <li><i><a href="/wiki/The_Precipice:_Existential_Risk_and_the_Future_of_Humanity" title="The Precipice: Existential Risk and the Future of Humanity">The Precipice</a></i></li> <li><i><a href="/wiki/Superintelligence:_Paths,_Dangers,_Strategies" title="Superintelligence: Paths, Dangers, Strategies">Superintelligence: Paths, Dangers, Strategies</a></i></li> <li><i><a href="/wiki/Do_You_Trust_This_Computer%3F" title="Do You Trust This Computer?">Do You Trust This Computer?</a></i></li> <li><a href="/wiki/Artificial_Intelligence_Act" title="Artificial Intelligence Act">Artificial Intelligence Act</a></li></ul> </div></td></tr><tr><td class="navbox-abovebelow" colspan="2"><div><span class="noviewer" typeof="mw:File"><span title="Category"><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/16px-Symbol_category_class.svg.png" decoding="async" width="16" height="16" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/23px-Symbol_category_class.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/31px-Symbol_category_class.svg.png 2x" data-file-width="180" data-file-height="185" /></span></span> <a href="/wiki/Category:Existential_risk_from_artificial_general_intelligence" title="Category:Existential risk from artificial general intelligence">Category</a></div></td></tr></tbody></table></div> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236075235"></div><div role="navigation" class="navbox" aria-labelledby="Effective_altruism" style="padding:3px"><table class="nowraplinks mw-collapsible autocollapse navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th scope="col" class="navbox-title" colspan="2"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1239400231"><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Effective_altruism" title="Template:Effective altruism"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/wiki/Template_talk:Effective_altruism" title="Template talk:Effective altruism"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Effective_altruism" title="Special:EditPage/Template:Effective altruism"><abbr title="Edit this template">e</abbr></a></li></ul></div><div id="Effective_altruism" style="font-size:114%;margin:0 4em"><a href="/wiki/Effective_altruism" title="Effective altruism">Effective altruism</a></div></th></tr><tr><th scope="row" class="navbox-group" style="width:1%">Concepts</th><td class="navbox-list-with-group navbox-list navbox-odd hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Aid_effectiveness" title="Aid effectiveness">Aid effectiveness</a></li> <li><a href="/wiki/Charity_assessment" title="Charity assessment">Charity assessment</a></li> <li><a href="/wiki/Demandingness_objection" title="Demandingness objection">Demandingness objection</a></li> <li><a href="/wiki/Disability-adjusted_life_year" title="Disability-adjusted life year">Disability-adjusted life year</a></li> <li><a href="/wiki/Disease_burden" title="Disease burden">Disease burden</a></li> <li><a href="/wiki/Distributional_cost-effectiveness_analysis" title="Distributional cost-effectiveness analysis">Distributional cost-effectiveness analysis</a></li> <li><a href="/wiki/Earning_to_give" title="Earning to give">Earning to give</a></li> <li><a href="/wiki/Equal_consideration_of_interests" title="Equal consideration of interests">Equal consideration of interests</a></li> <li><a href="/wiki/Longtermism" title="Longtermism">Longtermism</a></li> <li><a href="/wiki/Marginal_utility" title="Marginal utility">Marginal utility</a></li> <li><a href="/wiki/Moral_circle_expansion" title="Moral circle expansion">Moral circle expansion</a></li> <li><a href="/wiki/Psychological_barriers_to_effective_altruism" title="Psychological barriers to effective altruism">Psychological barriers to effective altruism</a></li> <li><a href="/wiki/Quality-adjusted_life_year" title="Quality-adjusted life year">Quality-adjusted life year</a></li> <li><a href="/wiki/Utilitarianism" title="Utilitarianism">Utilitarianism</a></li> <li><a href="/wiki/Venture_philanthropy" title="Venture philanthropy">Venture philanthropy</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Key figures</th><td class="navbox-list-with-group navbox-list navbox-even hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Sam_Bankman-Fried" title="Sam Bankman-Fried">Sam Bankman-Fried</a></li> <li><a href="/wiki/Liv_Boeree" title="Liv Boeree">Liv Boeree</a></li> <li><a href="/wiki/Nick_Bostrom" title="Nick Bostrom">Nick Bostrom</a></li> <li><a href="/wiki/Hilary_Greaves" title="Hilary Greaves">Hilary Greaves</a></li> <li><a href="/wiki/Holden_Karnofsky" title="Holden Karnofsky">Holden Karnofsky</a></li> <li><a href="/wiki/William_MacAskill" title="William MacAskill">William MacAskill</a></li> <li><a href="/wiki/Dustin_Moskovitz" title="Dustin Moskovitz">Dustin Moskovitz</a></li> <li><a href="/wiki/Yew-Kwang_Ng" title="Yew-Kwang Ng">Yew-Kwang Ng</a></li> <li><a href="/wiki/Toby_Ord" title="Toby Ord">Toby Ord</a></li> <li><a href="/wiki/Derek_Parfit" title="Derek Parfit">Derek Parfit</a></li> <li><a href="/wiki/Peter_Singer" title="Peter Singer">Peter Singer</a></li> <li><a href="/wiki/Cari_Tuna" title="Cari Tuna">Cari Tuna</a></li> <li><a href="/wiki/Eliezer_Yudkowsky" title="Eliezer Yudkowsky">Eliezer Yudkowsky</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Organizations</th><td class="navbox-list-with-group navbox-list navbox-odd hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/80,000_Hours" title="80,000 Hours">80,000 Hours</a></li> <li><a href="/wiki/Against_Malaria_Foundation" title="Against Malaria Foundation">Against Malaria Foundation</a></li> <li><a href="/wiki/Animal_Charity_Evaluators" title="Animal Charity Evaluators">Animal Charity Evaluators</a></li> <li><a href="/wiki/Animal_Ethics_(organization)" title="Animal Ethics (organization)">Animal Ethics</a></li> <li><a href="/wiki/Centre_for_Effective_Altruism" title="Centre for Effective Altruism">Centre for Effective Altruism</a></li> <li><a href="/wiki/Centre_for_Enabling_EA_Learning_%26_Research" title="Centre for Enabling EA Learning &amp; Research">Centre for Enabling EA Learning &amp; Research</a></li> <li><a href="/wiki/Center_for_High_Impact_Philanthropy" title="Center for High Impact Philanthropy">Center for High Impact Philanthropy</a></li> <li><a href="/wiki/Centre_for_the_Study_of_Existential_Risk" title="Centre for the Study of Existential Risk">Centre for the Study of Existential Risk</a></li> <li><a href="/wiki/Development_Media_International" title="Development Media International">Development Media International</a></li> <li><a href="/wiki/Evidence_Action" title="Evidence Action">Evidence Action</a></li> <li><a href="/wiki/Faunalytics" title="Faunalytics">Faunalytics</a></li> <li><a href="/wiki/Fistula_Foundation" title="Fistula Foundation">Fistula Foundation</a></li> <li><a href="/wiki/Future_of_Humanity_Institute" title="Future of Humanity Institute">Future of Humanity Institute</a></li> <li><a href="/wiki/Future_of_Life_Institute" title="Future of Life Institute">Future of Life Institute</a></li> <li><a href="/wiki/Founders_Pledge" title="Founders Pledge">Founders Pledge</a></li> <li><a href="/wiki/GiveDirectly" title="GiveDirectly">GiveDirectly</a></li> <li><a href="/wiki/GiveWell" title="GiveWell">GiveWell</a></li> <li><a href="/wiki/Giving_Multiplier" title="Giving Multiplier">Giving Multiplier</a></li> <li><a href="/wiki/Giving_What_We_Can" title="Giving What We Can">Giving What We Can</a></li> <li><a href="/wiki/Good_Food_Fund" title="Good Food Fund">Good Food Fund</a></li> <li><a href="/wiki/The_Good_Food_Institute" title="The Good Food Institute">The Good Food Institute</a></li> <li><a href="/wiki/Good_Ventures" title="Good Ventures">Good Ventures</a></li> <li><a href="/wiki/The_Humane_League" title="The Humane League">The Humane League</a></li> <li><a href="/wiki/Mercy_for_Animals" title="Mercy for Animals">Mercy for Animals</a></li> <li><a href="/wiki/Machine_Intelligence_Research_Institute" title="Machine Intelligence Research Institute">Machine Intelligence Research Institute</a></li> <li><a href="/wiki/Malaria_Consortium" title="Malaria Consortium">Malaria Consortium</a></li> <li><a href="/wiki/Nuclear_Threat_Initiative" title="Nuclear Threat Initiative">Nuclear Threat Initiative</a></li> <li><a href="/wiki/Open_Philanthropy_(organization)" class="mw-redirect" title="Open Philanthropy (organization)">Open Philanthropy</a></li> <li><a href="/wiki/Raising_for_Effective_Giving" title="Raising for Effective Giving">Raising for Effective Giving</a></li> <li><a href="/wiki/Sentience_Institute" title="Sentience Institute">Sentience Institute</a></li> <li><a href="/wiki/Unlimit_Health" title="Unlimit Health">Unlimit Health</a></li> <li><a href="/wiki/Wild_Animal_Initiative" title="Wild Animal Initiative">Wild Animal Initiative</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Focus areas</th><td class="navbox-list-with-group navbox-list navbox-even hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Biotechnology_risk" title="Biotechnology risk">Biotechnology risk</a></li> <li><a href="/wiki/Climate_change" title="Climate change">Climate change</a></li> <li><a href="/wiki/Cultured_meat" title="Cultured meat">Cultured meat</a></li> <li><a href="/wiki/Economic_stability" title="Economic stability">Economic stability</a></li> <li><a href="/wiki/Existential_risk_from_artificial_general_intelligence" class="mw-redirect" title="Existential risk from artificial general intelligence">Existential risk from artificial general intelligence</a></li> <li><a href="/wiki/Global_catastrophic_risk" title="Global catastrophic risk">Global catastrophic risk</a></li> <li><a href="/wiki/Global_health" title="Global health">Global health</a></li> <li><a href="/wiki/Global_poverty" class="mw-redirect" title="Global poverty">Global poverty</a></li> <li><a href="/wiki/Intensive_animal_farming" title="Intensive animal farming">Intensive animal farming</a></li> <li><a href="/wiki/Land_use" title="Land use">Land use reform</a></li> <li><a href="/wiki/Life_extension" title="Life extension">Life extension</a></li> <li><a href="/wiki/Malaria_prevention" class="mw-redirect" title="Malaria prevention">Malaria prevention</a></li> <li><a href="/wiki/Mass_deworming" title="Mass deworming">Mass deworming</a></li> <li><a href="/wiki/Neglected_tropical_diseases" title="Neglected tropical diseases">Neglected tropical diseases</a></li> <li><a href="/wiki/Risk_of_astronomical_suffering" title="Risk of astronomical suffering">Risk of astronomical suffering</a></li> <li><a href="/wiki/Wild_animal_suffering" title="Wild animal suffering">Wild animal suffering</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Literature</th><td class="navbox-list-with-group navbox-list navbox-odd hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><i><a href="/wiki/Doing_Good_Better" title="Doing Good Better">Doing Good Better</a></i></li> <li><i><a href="/wiki/The_End_of_Animal_Farming" title="The End of Animal Farming">The End of Animal Farming</a></i></li> <li><i><a href="/wiki/Famine,_Affluence,_and_Morality" title="Famine, Affluence, and Morality">Famine, Affluence, and Morality</a></i></li> <li><i><a href="/wiki/The_Life_You_Can_Save" title="The Life You Can Save">The Life You Can Save</a></i></li> <li><i><a href="/wiki/Living_High_and_Letting_Die" title="Living High and Letting Die">Living High and Letting Die</a></i></li> <li><i><a href="/wiki/The_Most_Good_You_Can_Do" title="The Most Good You Can Do">The Most Good You Can Do</a></i></li> <li><i><a href="/wiki/Practical_Ethics" title="Practical Ethics">Practical Ethics</a></i></li> <li><i><a href="/wiki/The_Precipice:_Existential_Risk_and_the_Future_of_Humanity" title="The Precipice: Existential Risk and the Future of Humanity">The Precipice</a></i></li> <li><i><a href="/wiki/Superintelligence:_Paths,_Dangers,_Strategies" title="Superintelligence: Paths, Dangers, Strategies">Superintelligence: Paths, Dangers, Strategies</a></i></li> <li><i><a href="/wiki/What_We_Owe_the_Future" title="What We Owe the Future">What We Owe the Future</a></i></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Events</th><td class="navbox-list-with-group navbox-list navbox-even hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Effective_Altruism_Global" title="Effective Altruism Global">Effective Altruism Global</a></li></ul> </div></td></tr></tbody></table></div> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236075235"></div><div role="navigation" class="navbox" aria-labelledby="Global_catastrophic_risks" style="padding:3px"><table class="nowraplinks hlist mw-collapsible autocollapse navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th scope="col" class="navbox-title" colspan="2"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1239400231"><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Global_catastrophic_risks" title="Template:Global catastrophic risks"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/wiki/Template_talk:Global_catastrophic_risks" title="Template talk:Global catastrophic risks"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Global_catastrophic_risks" title="Special:EditPage/Template:Global catastrophic risks"><abbr title="Edit this template">e</abbr></a></li></ul></div><div id="Global_catastrophic_risks" style="font-size:114%;margin:0 4em"><a href="/wiki/Global_catastrophic_risk" title="Global catastrophic risk">Global catastrophic risks</a></div></th></tr><tr><td class="navbox-abovebelow" colspan="2"><div> <ul><li><a href="/wiki/Future_of_Earth" title="Future of Earth">Future of the Earth</a></li> <li><a href="/wiki/Future_of_an_expanding_universe" title="Future of an expanding universe">Future of an expanding universe</a> <ul><li><a href="/wiki/Ultimate_fate_of_the_universe" title="Ultimate fate of the universe">Ultimate fate of the universe</a></li></ul></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Technological</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Chemical_warfare" title="Chemical warfare">Chemical warfare</a></li> <li><a href="/wiki/Cyberattack" title="Cyberattack">Cyberattack</a> <ul><li><a href="/wiki/Cyberwarfare" title="Cyberwarfare">Cyberwarfare</a></li> <li><a href="/wiki/Cyberterrorism" title="Cyberterrorism">Cyberterrorism</a></li> <li><a href="/wiki/Cybergeddon" title="Cybergeddon">Cybergeddon</a></li></ul></li> <li><a href="/wiki/Gray_goo" title="Gray goo">Gray goo</a></li> <li><a href="/wiki/Industrial_applications_of_nanotechnology#Weapons" title="Industrial applications of nanotechnology">Nanoweapons</a></li> <li><a href="/wiki/Kinetic_bombardment" title="Kinetic bombardment">Kinetic bombardment</a> <ul><li><a href="/wiki/Kinetic_energy_weapon" title="Kinetic energy weapon">Kinetic energy weapon</a></li></ul></li> <li><a href="/wiki/Nuclear_warfare" title="Nuclear warfare">Nuclear warfare</a> <ul><li><a href="/wiki/Mutual_assured_destruction" title="Mutual assured destruction">Mutual assured destruction</a></li> <li><a href="/wiki/Dead_Hand" title="Dead Hand">Dead Hand</a></li> <li><a href="/wiki/Doomsday_Clock" title="Doomsday Clock">Doomsday Clock</a></li> <li><a href="/wiki/Doomsday_device" title="Doomsday device">Doomsday device</a></li> <li><a href="/wiki/Antimatter_weapon" title="Antimatter weapon">Antimatter weapon</a></li></ul></li> <li><a href="/wiki/Electromagnetic_pulse" title="Electromagnetic pulse">Electromagnetic pulse</a> (EMP)</li> <li><a href="/wiki/Safety_of_high-energy_particle_collision_experiments" title="Safety of high-energy particle collision experiments">Safety of high-energy particle collision experiments</a> <ul><li><a href="/wiki/Micro_black_hole" title="Micro black hole">Micro black hole</a></li> <li><a href="/wiki/Strangelet" title="Strangelet">Strangelet</a></li></ul></li> <li><a href="/wiki/Synthetic_intelligence" title="Synthetic intelligence">Synthetic intelligence</a> / <a href="/wiki/Artificial_intelligence" title="Artificial intelligence">Artificial intelligence</a> <ul><li><a href="/wiki/AI_takeover" title="AI takeover">AI takeover</a></li> <li><a href="/wiki/Existential_risk_from_artificial_general_intelligence" class="mw-redirect" title="Existential risk from artificial general intelligence">Existential risk from artificial intelligence</a></li> <li><a href="/wiki/Technological_singularity" title="Technological singularity">Technological singularity</a></li></ul></li> <li><a href="/wiki/Transhumanism" title="Transhumanism">Transhumanism</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Sociological</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Anthropogenic_hazard" class="mw-redirect" title="Anthropogenic hazard">Anthropogenic hazard</a></li> <li><a href="/wiki/Collapsology" title="Collapsology">Collapsology</a></li> <li><a href="/wiki/Doomsday_argument" title="Doomsday argument">Doomsday argument</a> <ul><li><a href="/wiki/Self-indication_assumption_doomsday_argument_rebuttal" title="Self-indication assumption doomsday argument rebuttal">Self-indication assumption doomsday argument rebuttal</a></li> <li><a href="/wiki/Self-referencing_doomsday_argument_rebuttal" title="Self-referencing doomsday argument rebuttal">Self-referencing doomsday argument rebuttal</a></li></ul></li> <li><a href="/wiki/Economic_collapse" title="Economic collapse">Economic collapse</a></li> <li><a href="/wiki/Malthusianism" title="Malthusianism">Malthusian catastrophe</a></li> <li><a href="/wiki/New_World_Order_(conspiracy_theory)" class="mw-redirect" title="New World Order (conspiracy theory)">New World Order (conspiracy theory)</a></li> <li><a href="/wiki/Nuclear_holocaust" title="Nuclear holocaust">Nuclear holocaust</a> <ul><li><a href="/wiki/Cobalt_bomb" title="Cobalt bomb">cobalt</a></li> <li><a href="/wiki/Nuclear_famine" title="Nuclear famine">famine</a></li> <li><a href="/wiki/Nuclear_winter" title="Nuclear winter">winter</a></li></ul></li> <li><a href="/wiki/Societal_collapse" title="Societal collapse">Societal collapse</a></li> <li><a href="/wiki/State_collapse" title="State collapse">State collapse</a></li> <li><a href="/wiki/World_War_III" title="World War III">World War III</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Climate_apocalypse" title="Climate apocalypse">Ecological</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"></div><table class="nowraplinks navbox-subgroup" style="border-spacing:0"><tbody><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Climate_variability_and_change" title="Climate variability and change">Climate change</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Anoxic_event" title="Anoxic event">Anoxic event</a></li> <li><a href="/wiki/Biodiversity_loss" title="Biodiversity loss">Biodiversity loss</a> <ul><li><a href="/wiki/Mass_mortality_event" title="Mass mortality event">Mass mortality event</a></li></ul></li> <li><a href="/wiki/Cascade_effect_(ecology)" title="Cascade effect (ecology)">Cascade effect</a></li> <li><a href="/wiki/Cataclysmic_pole_shift_hypothesis" title="Cataclysmic pole shift hypothesis">Cataclysmic pole shift hypothesis</a></li> <li><a href="/wiki/Climate_change_and_civilizational_collapse" title="Climate change and civilizational collapse">Climate change and civilizational collapse</a></li> <li><a href="/wiki/Deforestation" title="Deforestation">Deforestation</a></li> <li><a href="/wiki/Desertification" title="Desertification">Desertification</a></li> <li><a href="/wiki/Extinction_risk_from_climate_change" title="Extinction risk from climate change">Extinction risk from climate change</a> <ul><li><a href="/wiki/Tipping_points_in_the_climate_system" title="Tipping points in the climate system">Tipping points in the climate system</a></li></ul></li> <li><a href="/wiki/Flood_basalt" title="Flood basalt">Flood basalt</a></li> <li><a href="/wiki/Global_dimming" title="Global dimming">Global dimming</a></li> <li><a href="/wiki/Global_terrestrial_stilling" title="Global terrestrial stilling">Global terrestrial stilling</a></li> <li><a href="/wiki/Climate_change" title="Climate change">Global warming</a></li> <li><a href="/wiki/Hypercane" title="Hypercane">Hypercane</a></li> <li><a href="/wiki/Ice_age" title="Ice age">Ice age</a></li> <li><a href="/wiki/Ecocide" title="Ecocide">Ecocide</a></li> <li><a href="/wiki/Ecological_collapse" class="mw-redirect" title="Ecological collapse">Ecological collapse</a></li> <li><a href="/wiki/Environmental_degradation" title="Environmental degradation">Environmental degradation</a></li> <li><a href="/wiki/Habitat_destruction" title="Habitat destruction">Habitat destruction</a></li> <li><a href="/wiki/Human_impact_on_the_environment" title="Human impact on the environment">Human impact on the environment</a> <ul><li><a href="/wiki/Environmental_issues_with_coral_reefs" title="Environmental issues with coral reefs">coral reefs</a></li> <li><a href="/wiki/Human_impact_on_marine_life" title="Human impact on marine life">on marine life</a></li></ul></li> <li><a href="/wiki/Land_degradation" title="Land degradation">Land degradation</a></li> <li><a href="/wiki/Land_consumption" title="Land consumption">Land consumption</a></li> <li><a href="/wiki/Land_surface_effects_on_climate" title="Land surface effects on climate">Land surface effects on climate</a></li> <li><a href="/wiki/Ocean_acidification" title="Ocean acidification">Ocean acidification</a></li> <li><a href="/wiki/Ozone_depletion" title="Ozone depletion">Ozone depletion</a></li> <li><a href="/wiki/Resource_depletion" title="Resource depletion">Resource depletion</a></li> <li><a href="/wiki/Sea_level_rise" title="Sea level rise">Sea level rise</a></li> <li><a href="/wiki/Supervolcano" title="Supervolcano">Supervolcano</a> <ul><li><a href="/wiki/Volcanic_winter" title="Volcanic winter">winter</a></li></ul></li> <li><a href="/wiki/Verneshot" title="Verneshot">Verneshot</a></li> <li><a href="/wiki/Water_pollution" title="Water pollution">Water pollution</a></li> <li><a href="/wiki/Water_scarcity" title="Water scarcity">Water scarcity</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Earth_Overshoot_Day" title="Earth Overshoot Day">Earth Overshoot Day</a></th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Overexploitation" title="Overexploitation">Overexploitation</a></li> <li><a href="/wiki/Overpopulation" title="Overpopulation">Overpopulation</a> <ul><li><a href="/wiki/Human_overpopulation" title="Human overpopulation">Human overpopulation</a></li></ul></li></ul> </div></td></tr></tbody></table><div></div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Biological</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"></div><table class="nowraplinks navbox-subgroup" style="border-spacing:0"><tbody><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Extinction" title="Extinction">Extinction</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Extinction_event" title="Extinction event">Extinction event</a></li> <li><a href="/wiki/Holocene_extinction" title="Holocene extinction">Holocene extinction</a></li> <li><a href="/wiki/Human_extinction" title="Human extinction">Human extinction</a></li> <li><a href="/wiki/List_of_extinction_events" title="List of extinction events">List of extinction events</a></li> <li><a href="/wiki/Genetic_erosion" title="Genetic erosion">Genetic erosion</a></li> <li><a href="/wiki/Genetic_pollution" title="Genetic pollution">Genetic pollution</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Others</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Biodiversity_loss" title="Biodiversity loss">Biodiversity loss</a> <ul><li><a href="/wiki/Decline_in_amphibian_populations" title="Decline in amphibian populations">Decline in amphibian populations</a></li> <li><a href="/wiki/Decline_in_insect_populations" title="Decline in insect populations">Decline in insect populations</a></li></ul></li> <li><a href="/wiki/Biotechnology_risk" title="Biotechnology risk">Biotechnology risk</a> <ul><li><a href="/wiki/Biological_agent" title="Biological agent">Biological agent</a></li> <li><a href="/wiki/Biological_warfare" title="Biological warfare">Biological warfare</a></li> <li><a href="/wiki/Bioterrorism" title="Bioterrorism">Bioterrorism</a></li></ul></li> <li><a href="/wiki/Colony_collapse_disorder" title="Colony collapse disorder">Colony collapse disorder</a></li> <li><a href="/wiki/Defaunation" title="Defaunation">Defaunation</a></li> <li><a href="/wiki/Dysgenics" title="Dysgenics">Dysgenics</a></li> <li><a href="/wiki/Interplanetary_contamination" title="Interplanetary contamination">Interplanetary contamination</a></li> <li><a href="/wiki/Pandemic" title="Pandemic">Pandemic</a></li> <li><a href="/wiki/Pollinator_decline" title="Pollinator decline">Pollinator decline</a></li> <li><a href="/wiki/Overfishing" title="Overfishing">Overfishing</a></li></ul> </div></td></tr></tbody></table><div></div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Astronomical</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Big_Crunch" title="Big Crunch">Big Crunch</a></li> <li><a href="/wiki/Big_Rip" title="Big Rip">Big Rip</a></li> <li><a href="/wiki/Coronal_mass_ejection" title="Coronal mass ejection">Coronal mass ejection</a></li> <li><a href="/wiki/Cosmological_phase_transition" title="Cosmological phase transition">Cosmological phase transition</a></li> <li><a href="/wiki/Geomagnetic_storm" title="Geomagnetic storm">Geomagnetic storm</a></li> <li><a href="/wiki/False_vacuum_decay" class="mw-redirect" title="False vacuum decay">False vacuum decay</a></li> <li><a href="/wiki/Gamma-ray_burst" title="Gamma-ray burst">Gamma-ray burst</a></li> <li><a href="/wiki/Heat_death_of_the_universe" title="Heat death of the universe">Heat death of the universe</a></li> <li><a href="/wiki/Proton_decay" title="Proton decay">Proton decay</a></li> <li><a href="/wiki/Virtual_black_hole" title="Virtual black hole">Virtual black hole</a></li> <li><a href="/wiki/Impact_event" title="Impact event">Impact event</a> <ul><li><a href="/wiki/Asteroid_impact_avoidance" title="Asteroid impact avoidance">Asteroid impact avoidance</a></li> <li><a href="/wiki/Asteroid_impact_prediction" title="Asteroid impact prediction">Asteroid impact prediction</a></li> <li><a href="/wiki/Potentially_hazardous_object" title="Potentially hazardous object">Potentially hazardous object</a> <ul><li><a href="/wiki/Near-Earth_object" title="Near-Earth object">Near-Earth object</a></li></ul></li> <li><a href="/wiki/Impact_winter" title="Impact winter">winter</a></li> <li><a href="/wiki/Rogue_planet" title="Rogue planet">Rogue planet</a></li></ul></li> <li><a href="/wiki/Near-Earth_supernova" title="Near-Earth supernova">Near-Earth supernova</a></li> <li><a href="/wiki/Hypernova" title="Hypernova">Hypernova</a></li> <li><a href="/wiki/Micronova" title="Micronova">Micronova</a></li> <li><a href="/wiki/Solar_flare" title="Solar flare">Solar flare</a></li> <li><a href="/wiki/Stellar_collision" title="Stellar collision">Stellar collision</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Eschatology" title="Eschatology">Eschatological</a></th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Buddhist_eschatology" title="Buddhist eschatology">Buddhist</a> <ul><li><a href="/wiki/Maitreya" title="Maitreya">Maitreya</a></li> <li><a href="/wiki/Three_Ages_of_Buddhism" title="Three Ages of Buddhism">Three Ages</a></li></ul></li> <li><a href="/wiki/Hindu_eschatology" title="Hindu eschatology">Hindu</a> <ul><li><a href="/wiki/Kalki" title="Kalki">Kalki</a></li> <li><a href="/wiki/Kali_Yuga" title="Kali Yuga">Kali Yuga</a></li></ul></li> <li><a href="/wiki/Last_Judgment" title="Last Judgment">Last Judgement</a></li> <li><a href="/wiki/Second_Coming" title="Second Coming">Second Coming</a> <ul><li><a href="/wiki/Book_of_Enoch" title="Book of Enoch">1 Enoch</a></li> <li><a href="/wiki/Book_of_Daniel" title="Book of Daniel">Daniel</a> <ul><li><a href="/wiki/Abomination_of_desolation" title="Abomination of desolation">Abomination of desolation</a></li> <li><a href="/wiki/Prophecy_of_Seventy_Weeks" title="Prophecy of Seventy Weeks">Prophecy of Seventy Weeks</a></li></ul></li> <li><a href="/wiki/Messiah" title="Messiah">Messiah</a></li> <li><a href="/wiki/Christian_eschatology" title="Christian eschatology">Christian</a> <ul><li><a href="/wiki/Futurism_(Christianity)" title="Futurism (Christianity)">Futurism</a></li> <li><a href="/wiki/Historicism_(Christianity)" title="Historicism (Christianity)">Historicism</a> <ul><li><a href="/wiki/Historicist_interpretations_of_the_Book_of_Revelation" title="Historicist interpretations of the Book of Revelation">Interpretations of Revelation</a></li></ul></li> <li><a href="/wiki/Idealism_(Christian_eschatology)" title="Idealism (Christian eschatology)"> Idealism</a></li> <li><a href="/wiki/Preterism" title="Preterism">Preterism</a></li> <li><a href="/wiki/2_Esdras" title="2 Esdras">2 Esdras</a></li> <li><a href="/wiki/Second_Epistle_to_the_Thessalonians" title="Second Epistle to the Thessalonians">2 Thessalonians</a> <ul><li><a href="/wiki/Man_of_sin" title="Man of sin">Man of sin</a></li> <li><a href="/wiki/Katechon" title="Katechon">Katechon</a></li></ul></li> <li><a href="/wiki/Antichrist" title="Antichrist">Antichrist</a></li> <li><a href="/wiki/Book_of_Revelation" title="Book of Revelation">Book of Revelation</a> <ul><li><a href="/wiki/Events_of_Revelation" title="Events of Revelation">Events</a> <ul><li><a href="/wiki/Four_Horsemen_of_the_Apocalypse" title="Four Horsemen of the Apocalypse">Four Horsemen of the Apocalypse</a></li> <li><a href="/wiki/Lake_of_fire" title="Lake of fire">Lake of fire</a></li> <li><a href="/wiki/Number_of_the_beast" title="Number of the beast">Number of the Beast</a></li> <li><a href="/wiki/Seven_bowls" title="Seven bowls">Seven bowls</a></li> <li><a href="/wiki/Seven_seals" title="Seven seals">Seven seals</a></li> <li><a href="/wiki/The_Beast_(Revelation)" title="The Beast (Revelation)">The Beast</a></li> <li><a href="/wiki/Two_witnesses" title="Two witnesses">Two witnesses</a></li> <li><a href="/wiki/War_in_Heaven" title="War in Heaven">War in Heaven</a></li> <li><a href="/wiki/Whore_of_Babylon" title="Whore of Babylon">Whore of Babylon</a></li></ul></li></ul></li> <li><a href="/wiki/Great_Apostasy" title="Great Apostasy">Great Apostasy</a></li> <li><a href="/wiki/New_Earth_(Christianity)" title="New Earth (Christianity)">New Earth</a></li> <li><a href="/wiki/New_Jerusalem" title="New Jerusalem">New Jerusalem</a></li> <li><a href="/wiki/Olivet_Discourse" title="Olivet Discourse">Olivet Discourse</a> <ul><li><a href="/wiki/Great_Tribulation" title="Great Tribulation">Great Tribulation</a></li></ul></li> <li><a href="/wiki/Son_of_perdition" title="Son of perdition">Son of perdition</a></li> <li><a href="/wiki/The_Sheep_and_the_Goats" title="The Sheep and the Goats">Sheep and Goats</a></li></ul></li> <li><a href="/wiki/Islamic_eschatology" title="Islamic eschatology">Islamic</a> <ul><li><a href="/wiki/Qa%27im_Al_Muhammad" title="Qa&#39;im Al Muhammad">Al-Qa'im</a></li> <li><a href="/wiki/Beast_of_the_Earth" title="Beast of the Earth">Beast of the Earth</a></li> <li><a href="/wiki/Dhu_al-Qarnayn" title="Dhu al-Qarnayn">Dhu al-Qarnayn</a></li> <li><a href="/wiki/Dhul-Suwayqatayn" title="Dhul-Suwayqatayn">Dhul-Suwayqatayn</a></li> <li><a href="/wiki/Al-Masih_ad-Dajjal" title="Al-Masih ad-Dajjal">Dajjal</a></li> <li><a href="/wiki/Israfil" title="Israfil">Israfil</a></li> <li><a href="/wiki/Mahdi" title="Mahdi">Mahdi</a></li> <li><a href="/wiki/Sufyani" title="Sufyani">Sufyani</a></li></ul></li> <li><a href="/wiki/Jewish_eschatology" title="Jewish eschatology">Jewish</a> <ul><li><a href="/wiki/Messiah_in_Judaism" title="Messiah in Judaism">Messiah</a></li> <li><a href="/wiki/Gog_and_Magog" title="Gog and Magog">War of Gog and Magog</a></li> <li><a href="/wiki/Third_Temple" title="Third Temple">Third Temple</a></li></ul></li></ul></li> <li><a href="/wiki/Ragnar%C3%B6k" title="Ragnarök">Norse</a></li> <li><a href="/wiki/Frashokereti" title="Frashokereti">Zoroastrian</a> <ul><li><a href="/wiki/Saoshyant" title="Saoshyant">Saoshyant</a></li></ul></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Others</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/2011_end_times_prediction" title="2011 end times prediction">2011 end times prediction</a></li> <li><a href="/wiki/2012_phenomenon" title="2012 phenomenon">2012 phenomenon</a></li> <li><a href="/wiki/Apocalypse" title="Apocalypse">Apocalypse</a></li> <li><a href="/wiki/Apocalyptic_literature" title="Apocalyptic literature">Apocalyptic literature</a></li> <li><a href="/wiki/Apocalypticism" title="Apocalypticism">Apocalypticism</a></li> <li><a href="/wiki/Armageddon" title="Armageddon">Armageddon</a></li> <li><a href="/wiki/Blood_moon_prophecy" title="Blood moon prophecy">Blood moon prophecy</a></li> <li><a href="/wiki/Earth_Changes" title="Earth Changes">Earth Changes</a></li> <li><a href="/wiki/Global_catastrophic_risk" title="Global catastrophic risk">End time</a></li> <li><a href="/wiki/Gog_and_Magog" title="Gog and Magog">Gog and Magog</a></li> <li><a href="/wiki/List_of_dates_predicted_for_apocalyptic_events" title="List of dates predicted for apocalyptic events">List of dates predicted for apocalyptic events</a></li> <li><a href="/wiki/Messianism" title="Messianism">Messianism</a> <ul><li><a href="/wiki/Messianic_Age" title="Messianic Age">Messianic Age</a></li></ul></li> <li><a href="/wiki/Millenarianism" title="Millenarianism">Millenarianism</a></li> <li><a href="/wiki/Millennialism" title="Millennialism">Millennialism</a> <ul><li><a href="/wiki/Premillennialism" title="Premillennialism">Premillennialism</a></li> <li><a href="/wiki/Amillennialism" title="Amillennialism">Amillennialism</a></li> <li><a href="/wiki/Postmillennialism" title="Postmillennialism">Postmillennialism</a></li></ul></li> <li><a href="/wiki/Nemesis_(hypothetical_star)" title="Nemesis (hypothetical star)">Nemesis (hypothetical star)</a></li> <li><a href="/wiki/Nibiru_cataclysm" title="Nibiru cataclysm">Nibiru cataclysm</a></li> <li><a href="/wiki/Rapture" title="Rapture">Rapture</a> <ul><li><a href="/wiki/Rapture#Prewrath_premillennialism" title="Rapture">Prewrath</a></li> <li><a href="/wiki/Post-tribulation_rapture" title="Post-tribulation rapture">Post-tribulation rapture</a></li></ul></li> <li><a href="/wiki/Universal_resurrection" title="Universal resurrection">Resurrection of the dead</a></li> <li><a href="/wiki/World_to_come" title="World to come">World to come</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Fictional</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Alien_invasion" title="Alien invasion">Alien invasion</a></li> <li><a href="/wiki/Apocalyptic_and_post-apocalyptic_fiction" title="Apocalyptic and post-apocalyptic fiction">Apocalyptic and post-apocalyptic fiction</a> <ul><li><a href="/wiki/List_of_apocalyptic_and_post-apocalyptic_fiction" title="List of apocalyptic and post-apocalyptic fiction">List of apocalyptic and post-apocalyptic fiction</a></li> <li><a href="/wiki/List_of_apocalyptic_films" title="List of apocalyptic films">List of apocalyptic films</a></li></ul></li> <li><a href="/wiki/Climate_fiction" title="Climate fiction">Climate fiction</a></li> <li><a href="/wiki/Disaster_film" title="Disaster film">Disaster films</a> <ul><li><a href="/wiki/List_of_disaster_films" title="List of disaster films">List of disaster films</a></li></ul></li> <li><a href="/wiki/List_of_fictional_doomsday_devices" title="List of fictional doomsday devices">List of fictional doomsday devices</a></li> <li><a href="/wiki/Zombie_apocalypse" title="Zombie apocalypse">Zombie apocalypse</a> <ul><li><a href="/wiki/Zombie" title="Zombie">Zombie</a></li></ul></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Organizations</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Centre_for_the_Study_of_Existential_Risk" title="Centre for the Study of Existential Risk">Centre for the Study of Existential Risk</a></li> <li><a href="/wiki/Future_of_Humanity_Institute" title="Future of Humanity Institute">Future of Humanity Institute</a></li> <li><a href="/wiki/Future_of_Life_Institute" title="Future of Life Institute">Future of Life Institute</a></li> <li><a href="/wiki/Nuclear_Threat_Initiative" title="Nuclear Threat Initiative">Nuclear Threat Initiative</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">General</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Ransomware" title="Ransomware">Ransomware</a></li> <li><a href="/wiki/Cyberwarfare" title="Cyberwarfare">Cyberwarfare</a></li> <li><a href="/wiki/Disaster" title="Disaster">Disaster</a></li> <li><a href="/wiki/Economic_depression" title="Economic depression">Depression</a></li> <li><a href="/wiki/Financial_crisis" title="Financial crisis">Financial crisis</a></li> <li><a href="/wiki/Pandemic" title="Pandemic">Pandemic</a></li> <li><a href="/wiki/Riot" title="Riot">Riots</a></li> <li><a href="/wiki/Social_crisis" title="Social crisis">Social crisis</a></li> <li><a href="/wiki/Survivalism" title="Survivalism">Survivalism</a></li></ul> </div></td></tr><tr><td class="navbox-abovebelow" colspan="2"><div> <ul><li><b><span class="nowrap"><span class="noviewer" typeof="mw:File"><a href="/wiki/File:The_Earth_seen_from_Apollo_17.jpg" class="mw-file-description"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/9/97/The_Earth_seen_from_Apollo_17.jpg/16px-The_Earth_seen_from_Apollo_17.jpg" decoding="async" width="16" height="16" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/9/97/The_Earth_seen_from_Apollo_17.jpg/24px-The_Earth_seen_from_Apollo_17.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/9/97/The_Earth_seen_from_Apollo_17.jpg/32px-The_Earth_seen_from_Apollo_17.jpg 2x" data-file-width="3000" data-file-height="3002" /></a></span> </span><a href="/wiki/Portal:World" title="Portal:World">World&#32;portal</a></b></li> <li><span class="noviewer" typeof="mw:File"><span title="Category"><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/16px-Symbol_category_class.svg.png" decoding="async" width="16" height="16" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/23px-Symbol_category_class.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/31px-Symbol_category_class.svg.png 2x" data-file-width="180" data-file-height="185" /></span></span> Categories <ul><li><a href="/wiki/Category:Apocalypticism" title="Category:Apocalypticism">Apocalypticism</a></li> <li><a href="/wiki/Category:Future_problems" title="Category:Future problems">Future problems</a></li> <li><a href="/wiki/Category:Hazards" title="Category:Hazards">Hazards</a></li> <li><a href="/wiki/Category:Risk_analysis" title="Category:Risk analysis">Risk analysis</a></li> <li><a href="/wiki/Category:Doomsday_scenarios" title="Category:Doomsday scenarios">Doomsday scenarios</a></li></ul></li></ul> </div></td></tr></tbody></table></div> <!-- NewPP limit report Parsed by mw‐web.codfw.main‐6b7f745dd4‐sh94x Cached time: 20241125133748 Cache expiry: 2592000 Reduced expiry: false Complications: [vary‐revision‐sha1, show‐toc] CPU time usage: 1.826 seconds Real time usage: 1.975 seconds Preprocessor visited node count: 10805/1000000 Post‐expand include size: 456546/2097152 bytes Template argument size: 11328/2097152 bytes Highest expansion depth: 12/100 Expensive parser function count: 16/500 Unstrip recursion depth: 1/20 Unstrip post‐expand size: 695656/5000000 bytes Lua time usage: 1.178/10.000 seconds Lua memory usage: 7803776/52428800 bytes Lua Profile: MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::callParserFunction 260 ms 21.0% ? 260 ms 21.0% dataWrapper <mw.lua:672> 140 ms 11.3% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::find 100 ms 8.1% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::gsub 80 ms 6.5% makeMessage <mw.message.lua:76> 40 ms 3.2% format 40 ms 3.2% date 40 ms 3.2% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::match 40 ms 3.2% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::formatDate 40 ms 3.2% [others] 200 ms 16.1% Number of Wikibase entities loaded: 0/400 --> <!-- Transclusion expansion time report (%,ms,calls,template) 100.00% 1688.302 1 -total 66.39% 1120.931 2 Template:Reflist 14.98% 252.974 47 Template:Cite_news 14.30% 241.346 57 Template:Cite_web 12.29% 207.449 34 Template:Cite_journal 8.96% 151.278 15 Template:Cite_book 6.07% 102.532 1 Template:Sfn 5.20% 87.850 1 Template:Artificial_intelligence 5.07% 85.651 1 Template:Short_description 5.06% 85.502 1 Template:Sidebar_with_collapsible_lists --> <!-- Saved in parser cache with key enwiki:pcache:idhash:46583121-0!canonical and timestamp 20241125133748 and revision id 1257846244. Rendering was triggered because: page-view --> </div><!--esi <esi:include src="/esitest-fa8a495983347898/content" /> --><noscript><img src="https://login.wikimedia.org/wiki/Special:CentralAutoLogin/start?type=1x1" alt="" width="1" height="1" style="border: none; position: absolute;"></noscript> <div class="printfooter" data-nosnippet="">Retrieved from "<a dir="ltr" href="https://en.wikipedia.org/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;oldid=1257846244">https://en.wikipedia.org/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;oldid=1257846244</a>"</div></div> <div id="catlinks" class="catlinks" data-mw="interface"><div id="mw-normal-catlinks" class="mw-normal-catlinks"><a href="/wiki/Help:Category" title="Help:Category">Categories</a>: <ul><li><a href="/wiki/Category:Existential_risk_from_artificial_general_intelligence" title="Category:Existential risk from artificial general intelligence">Existential risk from artificial general intelligence</a></li><li><a href="/wiki/Category:Future_problems" title="Category:Future problems">Future problems</a></li><li><a href="/wiki/Category:Human_extinction" title="Category:Human extinction">Human extinction</a></li><li><a href="/wiki/Category:AI_safety" title="Category:AI safety">AI safety</a></li><li><a href="/wiki/Category:Technology_hazards" title="Category:Technology hazards">Technology hazards</a></li><li><a href="/wiki/Category:Doomsday_scenarios" title="Category:Doomsday scenarios">Doomsday scenarios</a></li></ul></div><div id="mw-hidden-catlinks" class="mw-hidden-catlinks mw-hidden-cats-hidden">Hidden categories: <ul><li><a href="/wiki/Category:Webarchive_template_wayback_links" title="Category:Webarchive template wayback links">Webarchive template wayback links</a></li><li><a href="/wiki/Category:Articles_with_short_description" title="Category:Articles with short description">Articles with short description</a></li><li><a href="/wiki/Category:Short_description_is_different_from_Wikidata" title="Category:Short description is different from Wikidata">Short description is different from Wikidata</a></li><li><a href="/wiki/Category:Use_dmy_dates_from_May_2018" title="Category:Use dmy dates from May 2018">Use dmy dates from May 2018</a></li></ul></div></div> </div> </main> </div> <div class="mw-footer-container"> <footer id="footer" class="mw-footer" > <ul id="footer-info"> <li id="footer-info-lastmod"> This page was last edited on 16 November 2024, at 22:49<span class="anonymous-show">&#160;(UTC)</span>.</li> <li id="footer-info-copyright">Text is available under the <a href="/wiki/Wikipedia:Text_of_the_Creative_Commons_Attribution-ShareAlike_4.0_International_License" title="Wikipedia:Text of the Creative Commons Attribution-ShareAlike 4.0 International License">Creative Commons Attribution-ShareAlike 4.0 License</a>; additional terms may apply. By using this site, you agree to the <a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Terms_of_Use" class="extiw" title="foundation:Special:MyLanguage/Policy:Terms of Use">Terms of Use</a> and <a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy" class="extiw" title="foundation:Special:MyLanguage/Policy:Privacy policy">Privacy Policy</a>. Wikipedia® is a registered trademark of the <a rel="nofollow" class="external text" href="https://wikimediafoundation.org/">Wikimedia Foundation, Inc.</a>, a non-profit organization.</li> </ul> <ul id="footer-places"> <li id="footer-places-privacy"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy">Privacy policy</a></li> <li id="footer-places-about"><a href="/wiki/Wikipedia:About">About Wikipedia</a></li> <li id="footer-places-disclaimers"><a href="/wiki/Wikipedia:General_disclaimer">Disclaimers</a></li> <li id="footer-places-contact"><a href="//en.wikipedia.org/wiki/Wikipedia:Contact_us">Contact Wikipedia</a></li> <li id="footer-places-wm-codeofconduct"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Universal_Code_of_Conduct">Code of Conduct</a></li> <li id="footer-places-developers"><a href="https://developer.wikimedia.org">Developers</a></li> <li id="footer-places-statslink"><a href="https://stats.wikimedia.org/#/en.wikipedia.org">Statistics</a></li> <li id="footer-places-cookiestatement"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Cookie_statement">Cookie statement</a></li> <li id="footer-places-mobileview"><a href="//en.m.wikipedia.org/w/index.php?title=Existential_risk_from_artificial_intelligence&amp;mobileaction=toggle_view_mobile" class="noprint stopMobileRedirectToggle">Mobile view</a></li> </ul> <ul id="footer-icons" class="noprint"> <li id="footer-copyrightico"><a href="https://wikimediafoundation.org/" class="cdx-button cdx-button--fake-button cdx-button--size-large cdx-button--fake-button--enabled"><img src="/static/images/footer/wikimedia-button.svg" width="84" height="29" alt="Wikimedia Foundation" loading="lazy"></a></li> <li id="footer-poweredbyico"><a href="https://www.mediawiki.org/" class="cdx-button cdx-button--fake-button cdx-button--size-large cdx-button--fake-button--enabled"><img src="/w/resources/assets/poweredby_mediawiki.svg" alt="Powered by MediaWiki" width="88" height="31" loading="lazy"></a></li> </ul> </footer> </div> </div> </div> <div class="vector-settings" id="p-dock-bottom"> <ul></ul> </div><script>(RLQ=window.RLQ||[]).push(function(){mw.config.set({"wgHostname":"mw-web.codfw.main-697b7966c5-xq9g4","wgBackendResponseTime":195,"wgPageParseReport":{"limitreport":{"cputime":"1.826","walltime":"1.975","ppvisitednodes":{"value":10805,"limit":1000000},"postexpandincludesize":{"value":456546,"limit":2097152},"templateargumentsize":{"value":11328,"limit":2097152},"expansiondepth":{"value":12,"limit":100},"expensivefunctioncount":{"value":16,"limit":500},"unstrip-depth":{"value":1,"limit":20},"unstrip-size":{"value":695656,"limit":5000000},"entityaccesscount":{"value":0,"limit":400},"timingprofile":["100.00% 1688.302 1 -total"," 66.39% 1120.931 2 Template:Reflist"," 14.98% 252.974 47 Template:Cite_news"," 14.30% 241.346 57 Template:Cite_web"," 12.29% 207.449 34 Template:Cite_journal"," 8.96% 151.278 15 Template:Cite_book"," 6.07% 102.532 1 Template:Sfn"," 5.20% 87.850 1 Template:Artificial_intelligence"," 5.07% 85.651 1 Template:Short_description"," 5.06% 85.502 1 Template:Sidebar_with_collapsible_lists"]},"scribunto":{"limitreport-timeusage":{"value":"1.178","limit":"10.000"},"limitreport-memusage":{"value":7803776,"limit":52428800},"limitreport-logs":"anchor_id_list = table#1 {\n [\"CITEREFAllenby2016\"] = 1,\n [\"CITEREFAnderson2014\"] = 1,\n [\"CITEREFArmstrong2013\"] = 1,\n [\"CITEREFBabauta\"] = 1,\n [\"CITEREFBalesD\u0026#039;AlessandroKirk-Giannini2024\"] = 1,\n [\"CITEREFBarrat2013\"] = 1,\n [\"CITEREFBarrettBaum2016\"] = 2,\n [\"CITEREFBaum2018\"] = 2,\n [\"CITEREFBostrom\"] = 1,\n [\"CITEREFBostrom2002\"] = 1,\n [\"CITEREFBostrom2012\"] = 2,\n [\"CITEREFBostrom2013\"] = 1,\n [\"CITEREFBostrom2014\"] = 2,\n [\"CITEREFBostrom2015\"] = 1,\n [\"CITEREFBostrom2016\"] = 1,\n [\"CITEREFBridge2017\"] = 1,\n [\"CITEREFBrimelow\"] = 1,\n [\"CITEREFCarayannisDraper2022\"] = 1,\n [\"CITEREFCarayannisDraper2023\"] = 1,\n [\"CITEREFChorost2016\"] = 1,\n [\"CITEREFChristian2020\"] = 1,\n [\"CITEREFClark2015a\"] = 1,\n [\"CITEREFClinton2017\"] = 1,\n [\"CITEREFColdewey2023\"] = 1,\n [\"CITEREFCotton-BarrattOrd2014\"] = 1,\n [\"CITEREFCoughlan2013\"] = 1,\n [\"CITEREFDadich\"] = 1,\n [\"CITEREFDawes2021\"] = 1,\n [\"CITEREFDe_Vynck2023\"] = 1,\n [\"CITEREFDietterichHorvitz2015\"] = 1,\n [\"CITEREFDignum2021\"] = 1,\n [\"CITEREFDoherty2018\"] = 1,\n [\"CITEREFDomonoske2017\"] = 1,\n [\"CITEREFDowd2017\"] = 1,\n [\"CITEREFEadicicco2015\"] = 1,\n [\"CITEREFFassihi2023\"] = 1,\n [\"CITEREFFisher\"] = 1,\n [\"CITEREFFord2015\"] = 1,\n [\"CITEREFFung2023\"] = 1,\n [\"CITEREFGarling2015\"] = 1,\n [\"CITEREFGebruTorres2024\"] = 1,\n [\"CITEREFGeist2016\"] = 1,\n [\"CITEREFGibbs2017\"] = 1,\n [\"CITEREFGraves2017\"] = 1,\n [\"CITEREFHaney2018\"] = 1,\n [\"CITEREFHendrycksMazeikaWoodside,_Thomas2023\"] = 1,\n [\"CITEREFHilliard2017\"] = 1,\n [\"CITEREFHsu2012\"] = 1,\n [\"CITEREFJackson\"] = 1,\n [\"CITEREFJindal2023\"] = 1,\n [\"CITEREFJohnson2015\"] = 1,\n [\"CITEREFJohnson2019\"] = 1,\n [\"CITEREFKasirzadeh2024\"] = 1,\n [\"CITEREFKateman2023\"] = 1,\n [\"CITEREFKelly2017\"] = 1,\n [\"CITEREFKharpal2017\"] = 1,\n [\"CITEREFKhatchadourian2015\"] = 1,\n [\"CITEREFKircher\"] = 1,\n [\"CITEREFKumar\"] = 1,\n [\"CITEREFMaas2019\"] = 1,\n [\"CITEREFMacAskill2022\"] = 1,\n [\"CITEREFMcGinnis2010\"] = 1,\n [\"CITEREFMcMillan2024\"] = 1,\n [\"CITEREFMecklin2023\"] = 1,\n [\"CITEREFMetz2018\"] = 1,\n [\"CITEREFMetz2023\"] = 1,\n [\"CITEREFMore2023\"] = 1,\n [\"CITEREFNgoChanSören_Mindermann2023\"] = 1,\n [\"CITEREFOrd2020\"] = 2,\n [\"CITEREFParkin2015\"] = 1,\n [\"CITEREFPiesing2012\"] = 1,\n [\"CITEREFPiper2023\"] = 1,\n [\"CITEREFPistono,_FedericoYampolskiy,_Roman_V.2016\"] = 1,\n [\"CITEREFRamamoorthyYampolskiy2018\"] = 1,\n [\"CITEREFRawlinson2015\"] = 1,\n [\"CITEREFRoose2023\"] = 1,\n [\"CITEREFRussell2014\"] = 1,\n [\"CITEREFRussell2017\"] = 1,\n [\"CITEREFRussellDeweyTegmark2015\"] = 1,\n [\"CITEREFRussellNorvig2003\"] = 1,\n [\"CITEREFRussellNorvig2009\"] = 1,\n [\"CITEREFSamuelsson2019\"] = 1,\n [\"CITEREFShermer2017\"] = 1,\n [\"CITEREFSotalaYampolskiy2014\"] = 5,\n [\"CITEREFSunak2023\"] = 1,\n [\"CITEREFTaylorHern2023\"] = 1,\n [\"CITEREFTegmark2017\"] = 1,\n [\"CITEREFTegmark2023\"] = 1,\n [\"CITEREFThe_White_House2023\"] = 1,\n [\"CITEREFTilli2016\"] = 2,\n [\"CITEREFTkachenko2024\"] = 1,\n [\"CITEREFToby_ShevlaneSebastian_FarquharBen_GarfinkelMary_Phuong2023\"] = 1,\n [\"CITEREFTorres2018\"] = 1,\n [\"CITEREFTurchinDenkenberger2018\"] = 1,\n [\"CITEREFTuring1951\"] = 2,\n [\"CITEREFTuring1996\"] = 1,\n [\"CITEREFTysonKikuchi2023\"] = 1,\n [\"CITEREFUrbinaLentzosInvernizziEkins2022\"] = 1,\n [\"CITEREFVincent2016\"] = 1,\n [\"CITEREFVincent2023\"] = 1,\n [\"CITEREFVynck2023\"] = 1,\n [\"CITEREFWakefield2015\"] = 1,\n [\"CITEREFWalter2023\"] = 1,\n [\"CITEREFWashington_Post2015\"] = 1,\n [\"CITEREFYampolskiy2014\"] = 1,\n [\"CITEREFYampolskiy2022\"] = 1,\n [\"CITEREFYudkowsky2008\"] = 1,\n [\"CITEREFYudkowsky2011\"] = 1,\n [\"CITEREFYudkowsky2023\"] = 1,\n}\ntemplate_list = table#1 {\n [\"Artificial intelligence\"] = 1,\n [\"Blockquote\"] = 2,\n [\"Cbignore\"] = 1,\n [\"Citation\"] = 3,\n [\"Cite arXiv\"] = 4,\n [\"Cite book\"] = 15,\n [\"Cite episode\"] = 1,\n [\"Cite journal\"] = 34,\n [\"Cite magazine\"] = 13,\n [\"Cite news\"] = 47,\n [\"Cite speech\"] = 1,\n [\"Cite web\"] = 57,\n [\"Clear\"] = 1,\n [\"Cquote\"] = 5,\n [\"Div col\"] = 1,\n [\"Div col end\"] = 1,\n [\"Doomsday\"] = 1,\n [\"Effective altruism\"] = 1,\n [\"Efn\"] = 3,\n [\"Existential risk from artificial intelligence\"] = 1,\n [\"Further\"] = 5,\n [\"Main\"] = 2,\n [\"Nbsp\"] = 1,\n [\"Notelist\"] = 1,\n [\"Reflist\"] = 1,\n [\"See also\"] = 3,\n [\"Sfn\"] = 1,\n [\"Short description\"] = 1,\n [\"Use dmy dates\"] = 1,\n [\"Webarchive\"] = 4,\n}\narticle_whitelist = table#1 {\n}\ntable#1 {\n [\"size\"] = \"tiny\",\n}\n","limitreport-profile":[["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::callParserFunction","260","21.0"],["?","260","21.0"],["dataWrapper \u003Cmw.lua:672\u003E","140","11.3"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::find","100","8.1"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::gsub","80","6.5"],["makeMessage \u003Cmw.message.lua:76\u003E","40","3.2"],["format","40","3.2"],["date","40","3.2"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::match","40","3.2"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::formatDate","40","3.2"],["[others]","200","16.1"]]},"cachereport":{"origin":"mw-web.codfw.main-6b7f745dd4-sh94x","timestamp":"20241125133748","ttl":2592000,"transientcontent":false}}});});</script> <script type="application/ld+json">{"@context":"https:\/\/schema.org","@type":"Article","name":"Existential risk from artificial intelligence","url":"https:\/\/en.wikipedia.org\/wiki\/Existential_risk_from_artificial_intelligence","sameAs":"http:\/\/www.wikidata.org\/entity\/Q21715237","mainEntity":"http:\/\/www.wikidata.org\/entity\/Q21715237","author":{"@type":"Organization","name":"Contributors to Wikimedia projects"},"publisher":{"@type":"Organization","name":"Wikimedia Foundation, Inc.","logo":{"@type":"ImageObject","url":"https:\/\/www.wikimedia.org\/static\/images\/wmf-hor-googpub.png"}},"datePublished":"2015-05-01T21:17:43Z","dateModified":"2024-11-16T22:49:58Z","headline":"hypothesis that artificial general intelligence could result in human extinction"}</script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10