CINXE.COM

Ethics of artificial intelligence - Wikipedia

<!DOCTYPE html> <html class="client-nojs vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-custom-font-size-clientpref-1 vector-feature-appearance-pinned-clientpref-1 vector-feature-night-mode-enabled skin-theme-clientpref-day vector-sticky-header-enabled vector-toc-available" lang="en" dir="ltr"> <head> <meta charset="UTF-8"> <title>Ethics of artificial intelligence - Wikipedia</title> <script>(function(){var className="client-js vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-custom-font-size-clientpref-1 vector-feature-appearance-pinned-clientpref-1 vector-feature-night-mode-enabled skin-theme-clientpref-day vector-sticky-header-enabled vector-toc-available";var cookie=document.cookie.match(/(?:^|; )enwikimwclientpreferences=([^;]+)/);if(cookie){cookie[1].split('%2C').forEach(function(pref){className=className.replace(new RegExp('(^| )'+pref.replace(/-clientpref-\w+$|[^\w-]+/g,'')+'-clientpref-\\w+( |$)'),'$1'+pref+'$2');});}document.documentElement.className=className;}());RLCONF={"wgBreakFrames":false,"wgSeparatorTransformTable":["",""],"wgDigitTransformTable":["",""],"wgDefaultDateFormat":"dmy", "wgMonthNames":["","January","February","March","April","May","June","July","August","September","October","November","December"],"wgRequestId":"33c7d012-dc7a-4ab0-b398-a878826d2e51","wgCanonicalNamespace":"","wgCanonicalSpecialPageName":false,"wgNamespaceNumber":0,"wgPageName":"Ethics_of_artificial_intelligence","wgTitle":"Ethics of artificial intelligence","wgCurRevisionId":1274944047,"wgRevisionId":1274944047,"wgArticleId":13659583,"wgIsArticle":true,"wgIsRedirect":false,"wgAction":"view","wgUserName":null,"wgUserGroups":["*"],"wgCategories":["Webarchive template wayback links","Articles with Russian-language sources (ru)","Articles with short description","Short description is different from Wikidata","All articles lacking reliable references","Articles lacking reliable references from January 2024","All accuracy disputes","Articles with disputed statements from April 2024","All articles with unsourced statements","Articles with unsourced statements from June 2024", "All articles with failed verification","Articles with failed verification from November 2020","Artificial intelligence","Philosophy of artificial intelligence","Ethics of science and technology","Regulation of robots"],"wgPageViewLanguage":"en","wgPageContentLanguage":"en","wgPageContentModel":"wikitext","wgRelevantPageName":"Ethics_of_artificial_intelligence","wgRelevantArticleId":13659583,"wgIsProbablyEditable":true,"wgRelevantPageIsProbablyEditable":true,"wgRestrictionEdit":[],"wgRestrictionMove":[],"wgNoticeProject":"wikipedia","wgCiteReferencePreviewsActive":false,"wgFlaggedRevsParams":{"tags":{"status":{"levels":1}}},"wgMediaViewerOnClick":true,"wgMediaViewerEnabledByDefault":true,"wgPopupsFlags":0,"wgVisualEditor":{"pageLanguageCode":"en","pageLanguageDir":"ltr","pageVariantFallbacks":"en"},"wgMFDisplayWikibaseDescriptions":{"search":true,"watchlist":true,"tagline":false,"nearby":true},"wgWMESchemaEditAttemptStepOversample":false,"wgWMEPageLength":100000, "wgEditSubmitButtonLabelPublish":true,"wgULSPosition":"interlanguage","wgULSisCompactLinksEnabled":false,"wgVector2022LanguageInHeader":true,"wgULSisLanguageSelectorEmpty":false,"wgWikibaseItemId":"Q12727779","wgCheckUserClientHintsHeadersJsApi":["brands","architecture","bitness","fullVersionList","mobile","model","platform","platformVersion"],"GEHomepageSuggestedEditsEnableTopics":true,"wgGETopicsMatchModeEnabled":false,"wgGEStructuredTaskRejectionReasonTextInputEnabled":false,"wgGELevelingUpEnabledForUser":false};RLSTATE={"ext.globalCssJs.user.styles":"ready","site.styles":"ready","user.styles":"ready","ext.globalCssJs.user":"ready","user":"ready","user.options":"loading","ext.cite.styles":"ready","ext.tmh.player.styles":"ready","skins.vector.search.codex.styles":"ready","skins.vector.styles":"ready","skins.vector.icons":"ready","jquery.makeCollapsible.styles":"ready","ext.wikimediamessages.styles":"ready","ext.visualEditor.desktopArticleTarget.noscript":"ready", "ext.uls.interlanguage":"ready","wikibase.client.init":"ready","ext.wikimediaBadges":"ready"};RLPAGEMODULES=["ext.cite.ux-enhancements","mediawiki.page.media","ext.tmh.player","ext.scribunto.logs","site","mediawiki.page.ready","jquery.makeCollapsible","mediawiki.toc","skins.vector.js","ext.centralNotice.geoIP","ext.centralNotice.startUp","ext.gadget.ReferenceTooltips","ext.gadget.switcher","ext.urlShortener.toolbar","ext.centralauth.centralautologin","mmv.bootstrap","ext.popups","ext.visualEditor.desktopArticleTarget.init","ext.visualEditor.targetLoader","ext.echo.centralauth","ext.eventLogging","ext.wikimediaEvents","ext.navigationTiming","ext.uls.interface","ext.cx.eventlogging.campaigns","ext.cx.uls.quick.actions","wikibase.client.vector-2022","ext.checkUser.clientHints","ext.growthExperiments.SuggestedEditSession"];</script> <script>(RLQ=window.RLQ||[]).push(function(){mw.loader.impl(function(){return["user.options@12s5i",function($,jQuery,require,module){mw.user.tokens.set({"patrolToken":"+\\","watchToken":"+\\","csrfToken":"+\\"}); }];});});</script> <link rel="stylesheet" href="/w/load.php?lang=en&amp;modules=ext.cite.styles%7Cext.tmh.player.styles%7Cext.uls.interlanguage%7Cext.visualEditor.desktopArticleTarget.noscript%7Cext.wikimediaBadges%7Cext.wikimediamessages.styles%7Cjquery.makeCollapsible.styles%7Cskins.vector.icons%2Cstyles%7Cskins.vector.search.codex.styles%7Cwikibase.client.init&amp;only=styles&amp;skin=vector-2022"> <script async="" src="/w/load.php?lang=en&amp;modules=startup&amp;only=scripts&amp;raw=1&amp;skin=vector-2022"></script> <meta name="ResourceLoaderDynamicStyles" content=""> <link rel="stylesheet" href="/w/load.php?lang=en&amp;modules=site.styles&amp;only=styles&amp;skin=vector-2022"> <meta name="generator" content="MediaWiki 1.44.0-wmf.16"> <meta name="referrer" content="origin"> <meta name="referrer" content="origin-when-cross-origin"> <meta name="robots" content="max-image-preview:standard"> <meta name="format-detection" content="telephone=no"> <meta name="viewport" content="width=1120"> <meta property="og:title" content="Ethics of artificial intelligence - Wikipedia"> <meta property="og:type" content="website"> <link rel="preconnect" href="//upload.wikimedia.org"> <link rel="alternate" media="only screen and (max-width: 640px)" href="//en.m.wikipedia.org/wiki/Ethics_of_artificial_intelligence"> <link rel="alternate" type="application/x-wiki" title="Edit this page" href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit"> <link rel="apple-touch-icon" href="/static/apple-touch/wikipedia.png"> <link rel="icon" href="/static/favicon/wikipedia.ico"> <link rel="search" type="application/opensearchdescription+xml" href="/w/rest.php/v1/search" title="Wikipedia (en)"> <link rel="EditURI" type="application/rsd+xml" href="//en.wikipedia.org/w/api.php?action=rsd"> <link rel="canonical" href="https://en.wikipedia.org/wiki/Ethics_of_artificial_intelligence"> <link rel="license" href="https://creativecommons.org/licenses/by-sa/4.0/deed.en"> <link rel="alternate" type="application/atom+xml" title="Wikipedia Atom feed" href="/w/index.php?title=Special:RecentChanges&amp;feed=atom"> <link rel="dns-prefetch" href="//meta.wikimedia.org" /> <link rel="dns-prefetch" href="login.wikimedia.org"> </head> <body class="skin--responsive skin-vector skin-vector-search-vue mediawiki ltr sitedir-ltr mw-hide-empty-elt ns-0 ns-subject mw-editable page-Ethics_of_artificial_intelligence rootpage-Ethics_of_artificial_intelligence skin-vector-2022 action-view"><a class="mw-jump-link" href="#bodyContent">Jump to content</a> <div class="vector-header-container"> <header class="vector-header mw-header"> <div class="vector-header-start"> <nav class="vector-main-menu-landmark" aria-label="Site"> <div id="vector-main-menu-dropdown" class="vector-dropdown vector-main-menu-dropdown vector-button-flush-left vector-button-flush-right" title="Main menu" > <input type="checkbox" id="vector-main-menu-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-main-menu-dropdown" class="vector-dropdown-checkbox " aria-label="Main menu" > <label id="vector-main-menu-dropdown-label" for="vector-main-menu-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-menu mw-ui-icon-wikimedia-menu"></span> <span class="vector-dropdown-label-text">Main menu</span> </label> <div class="vector-dropdown-content"> <div id="vector-main-menu-unpinned-container" class="vector-unpinned-container"> <div id="vector-main-menu" class="vector-main-menu vector-pinnable-element"> <div class="vector-pinnable-header vector-main-menu-pinnable-header vector-pinnable-header-unpinned" data-feature-name="main-menu-pinned" data-pinnable-element-id="vector-main-menu" data-pinned-container-id="vector-main-menu-pinned-container" data-unpinned-container-id="vector-main-menu-unpinned-container" > <div class="vector-pinnable-header-label">Main menu</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-main-menu.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-main-menu.unpin">hide</button> </div> <div id="p-navigation" class="vector-menu mw-portlet mw-portlet-navigation" > <div class="vector-menu-heading"> Navigation </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-mainpage-description" class="mw-list-item"><a href="/wiki/Main_Page" title="Visit the main page [z]" accesskey="z"><span>Main page</span></a></li><li id="n-contents" class="mw-list-item"><a href="/wiki/Wikipedia:Contents" title="Guides to browsing Wikipedia"><span>Contents</span></a></li><li id="n-currentevents" class="mw-list-item"><a href="/wiki/Portal:Current_events" title="Articles related to current events"><span>Current events</span></a></li><li id="n-randompage" class="mw-list-item"><a href="/wiki/Special:Random" title="Visit a randomly selected article [x]" accesskey="x"><span>Random article</span></a></li><li id="n-aboutsite" class="mw-list-item"><a href="/wiki/Wikipedia:About" title="Learn about Wikipedia and how it works"><span>About Wikipedia</span></a></li><li id="n-contactpage" class="mw-list-item"><a href="//en.wikipedia.org/wiki/Wikipedia:Contact_us" title="How to contact Wikipedia"><span>Contact us</span></a></li><li id="n-specialpages" class="mw-list-item"><a href="/wiki/Special:SpecialPages"><span>Special pages</span></a></li> </ul> </div> </div> <div id="p-interaction" class="vector-menu mw-portlet mw-portlet-interaction" > <div class="vector-menu-heading"> Contribute </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-help" class="mw-list-item"><a href="/wiki/Help:Contents" title="Guidance on how to use and edit Wikipedia"><span>Help</span></a></li><li id="n-introduction" class="mw-list-item"><a href="/wiki/Help:Introduction" title="Learn how to edit Wikipedia"><span>Learn to edit</span></a></li><li id="n-portal" class="mw-list-item"><a href="/wiki/Wikipedia:Community_portal" title="The hub for editors"><span>Community portal</span></a></li><li id="n-recentchanges" class="mw-list-item"><a href="/wiki/Special:RecentChanges" title="A list of recent changes to Wikipedia [r]" accesskey="r"><span>Recent changes</span></a></li><li id="n-upload" class="mw-list-item"><a href="/wiki/Wikipedia:File_upload_wizard" title="Add images or other media for use on Wikipedia"><span>Upload file</span></a></li> </ul> </div> </div> </div> </div> </div> </div> </nav> <a href="/wiki/Main_Page" class="mw-logo"> <img class="mw-logo-icon" src="/static/images/icons/wikipedia.png" alt="" aria-hidden="true" height="50" width="50"> <span class="mw-logo-container skin-invert"> <img class="mw-logo-wordmark" alt="Wikipedia" src="/static/images/mobile/copyright/wikipedia-wordmark-en.svg" style="width: 7.5em; height: 1.125em;"> <img class="mw-logo-tagline" alt="The Free Encyclopedia" src="/static/images/mobile/copyright/wikipedia-tagline-en.svg" width="117" height="13" style="width: 7.3125em; height: 0.8125em;"> </span> </a> </div> <div class="vector-header-end"> <div id="p-search" role="search" class="vector-search-box-vue vector-search-box-collapses vector-search-box-show-thumbnail vector-search-box-auto-expand-width vector-search-box"> <a href="/wiki/Special:Search" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only search-toggle" title="Search Wikipedia [f]" accesskey="f"><span class="vector-icon mw-ui-icon-search mw-ui-icon-wikimedia-search"></span> <span>Search</span> </a> <div class="vector-typeahead-search-container"> <div class="cdx-typeahead-search cdx-typeahead-search--show-thumbnail cdx-typeahead-search--auto-expand-width"> <form action="/w/index.php" id="searchform" class="cdx-search-input cdx-search-input--has-end-button"> <div id="simpleSearch" class="cdx-search-input__input-wrapper" data-search-loc="header-moved"> <div class="cdx-text-input cdx-text-input--has-start-icon"> <input class="cdx-text-input__input" type="search" name="search" placeholder="Search Wikipedia" aria-label="Search Wikipedia" autocapitalize="sentences" title="Search Wikipedia [f]" accesskey="f" id="searchInput" > <span class="cdx-text-input__icon cdx-text-input__start-icon"></span> </div> <input type="hidden" name="title" value="Special:Search"> </div> <button class="cdx-button cdx-search-input__end-button">Search</button> </form> </div> </div> </div> <nav class="vector-user-links vector-user-links-wide" aria-label="Personal tools"> <div class="vector-user-links-main"> <div id="p-vector-user-menu-preferences" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <div id="p-vector-user-menu-userpage" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <nav class="vector-appearance-landmark" aria-label="Appearance"> <div id="vector-appearance-dropdown" class="vector-dropdown " title="Change the appearance of the page&#039;s font size, width, and color" > <input type="checkbox" id="vector-appearance-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-appearance-dropdown" class="vector-dropdown-checkbox " aria-label="Appearance" > <label id="vector-appearance-dropdown-label" for="vector-appearance-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-appearance mw-ui-icon-wikimedia-appearance"></span> <span class="vector-dropdown-label-text">Appearance</span> </label> <div class="vector-dropdown-content"> <div id="vector-appearance-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <div id="p-vector-user-menu-notifications" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <div id="p-vector-user-menu-overflow" class="vector-menu mw-portlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-sitesupport-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="https://donate.wikimedia.org/?wmf_source=donate&amp;wmf_medium=sidebar&amp;wmf_campaign=en.wikipedia.org&amp;uselang=en" class=""><span>Donate</span></a> </li> <li id="pt-createaccount-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:CreateAccount&amp;returnto=Ethics+of+artificial+intelligence" title="You are encouraged to create an account and log in; however, it is not mandatory" class=""><span>Create account</span></a> </li> <li id="pt-login-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:UserLogin&amp;returnto=Ethics+of+artificial+intelligence" title="You&#039;re encouraged to log in; however, it&#039;s not mandatory. [o]" accesskey="o" class=""><span>Log in</span></a> </li> </ul> </div> </div> </div> <div id="vector-user-links-dropdown" class="vector-dropdown vector-user-menu vector-button-flush-right vector-user-menu-logged-out" title="Log in and more options" > <input type="checkbox" id="vector-user-links-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-user-links-dropdown" class="vector-dropdown-checkbox " aria-label="Personal tools" > <label id="vector-user-links-dropdown-label" for="vector-user-links-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-ellipsis mw-ui-icon-wikimedia-ellipsis"></span> <span class="vector-dropdown-label-text">Personal tools</span> </label> <div class="vector-dropdown-content"> <div id="p-personal" class="vector-menu mw-portlet mw-portlet-personal user-links-collapsible-item" title="User menu" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-sitesupport" class="user-links-collapsible-item mw-list-item"><a href="https://donate.wikimedia.org/?wmf_source=donate&amp;wmf_medium=sidebar&amp;wmf_campaign=en.wikipedia.org&amp;uselang=en"><span>Donate</span></a></li><li id="pt-createaccount" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:CreateAccount&amp;returnto=Ethics+of+artificial+intelligence" title="You are encouraged to create an account and log in; however, it is not mandatory"><span class="vector-icon mw-ui-icon-userAdd mw-ui-icon-wikimedia-userAdd"></span> <span>Create account</span></a></li><li id="pt-login" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:UserLogin&amp;returnto=Ethics+of+artificial+intelligence" title="You&#039;re encouraged to log in; however, it&#039;s not mandatory. [o]" accesskey="o"><span class="vector-icon mw-ui-icon-logIn mw-ui-icon-wikimedia-logIn"></span> <span>Log in</span></a></li> </ul> </div> </div> <div id="p-user-menu-anon-editor" class="vector-menu mw-portlet mw-portlet-user-menu-anon-editor" > <div class="vector-menu-heading"> Pages for logged out editors <a href="/wiki/Help:Introduction" aria-label="Learn more about editing"><span>learn more</span></a> </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-anoncontribs" class="mw-list-item"><a href="/wiki/Special:MyContributions" title="A list of edits made from this IP address [y]" accesskey="y"><span>Contributions</span></a></li><li id="pt-anontalk" class="mw-list-item"><a href="/wiki/Special:MyTalk" title="Discussion about edits from this IP address [n]" accesskey="n"><span>Talk</span></a></li> </ul> </div> </div> </div> </div> </nav> </div> </header> </div> <div class="mw-page-container"> <div class="mw-page-container-inner"> <div class="vector-sitenotice-container"> <div id="siteNotice"><!-- CentralNotice --></div> </div> <div class="vector-column-start"> <div class="vector-main-menu-container"> <div id="mw-navigation"> <nav id="mw-panel" class="vector-main-menu-landmark" aria-label="Site"> <div id="vector-main-menu-pinned-container" class="vector-pinned-container"> </div> </nav> </div> </div> <div class="vector-sticky-pinned-container"> <nav id="mw-panel-toc" aria-label="Contents" data-event-name="ui.sidebar-toc" class="mw-table-of-contents-container vector-toc-landmark"> <div id="vector-toc-pinned-container" class="vector-pinned-container"> <div id="vector-toc" class="vector-toc vector-pinnable-element"> <div class="vector-pinnable-header vector-toc-pinnable-header vector-pinnable-header-pinned" data-feature-name="toc-pinned" data-pinnable-element-id="vector-toc" > <h2 class="vector-pinnable-header-label">Contents</h2> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-toc.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-toc.unpin">hide</button> </div> <ul class="vector-toc-contents" id="mw-panel-toc-list"> <li id="toc-mw-content-text" class="vector-toc-list-item vector-toc-level-1"> <a href="#" class="vector-toc-link"> <div class="vector-toc-text">(Top)</div> </a> </li> <li id="toc-Machine_ethics" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Machine_ethics"> <div class="vector-toc-text"> <span class="vector-toc-numb">1</span> <span>Machine ethics</span> </div> </a> <button aria-controls="toc-Machine_ethics-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Machine ethics subsection</span> </button> <ul id="toc-Machine_ethics-sublist" class="vector-toc-list"> <li id="toc-Robot_ethics" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Robot_ethics"> <div class="vector-toc-text"> <span class="vector-toc-numb">1.1</span> <span>Robot ethics</span> </div> </a> <ul id="toc-Robot_ethics-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Ethical_principles" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Ethical_principles"> <div class="vector-toc-text"> <span class="vector-toc-numb">1.2</span> <span>Ethical principles</span> </div> </a> <ul id="toc-Ethical_principles-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Current_challenges" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Current_challenges"> <div class="vector-toc-text"> <span class="vector-toc-numb">2</span> <span>Current challenges</span> </div> </a> <button aria-controls="toc-Current_challenges-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Current challenges subsection</span> </button> <ul id="toc-Current_challenges-sublist" class="vector-toc-list"> <li id="toc-Algorithmic_biases" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Algorithmic_biases"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.1</span> <span>Algorithmic biases</span> </div> </a> <ul id="toc-Algorithmic_biases-sublist" class="vector-toc-list"> <li id="toc-Language_bias" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Language_bias"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.1.1</span> <span>Language bias</span> </div> </a> <ul id="toc-Language_bias-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Gender_bias" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Gender_bias"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.1.2</span> <span>Gender bias</span> </div> </a> <ul id="toc-Gender_bias-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Political_bias" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Political_bias"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.1.3</span> <span>Political bias</span> </div> </a> <ul id="toc-Political_bias-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Stereotyping" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Stereotyping"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.1.4</span> <span>Stereotyping</span> </div> </a> <ul id="toc-Stereotyping-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Dominance_by_tech_giants" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Dominance_by_tech_giants"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.2</span> <span>Dominance by tech giants</span> </div> </a> <ul id="toc-Dominance_by_tech_giants-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Open-source" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Open-source"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.3</span> <span>Open-source</span> </div> </a> <ul id="toc-Open-source-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Transparency" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Transparency"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.4</span> <span>Transparency</span> </div> </a> <ul id="toc-Transparency-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Accountability" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Accountability"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.5</span> <span>Accountability</span> </div> </a> <ul id="toc-Accountability-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Regulation" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Regulation"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.6</span> <span>Regulation</span> </div> </a> <ul id="toc-Regulation-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Emergent_or_potential_future_challenges" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Emergent_or_potential_future_challenges"> <div class="vector-toc-text"> <span class="vector-toc-numb">3</span> <span>Emergent or potential future challenges</span> </div> </a> <button aria-controls="toc-Emergent_or_potential_future_challenges-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Emergent or potential future challenges subsection</span> </button> <ul id="toc-Emergent_or_potential_future_challenges-sublist" class="vector-toc-list"> <li id="toc-Increasing_use" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Increasing_use"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.1</span> <span>Increasing use</span> </div> </a> <ul id="toc-Increasing_use-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Robot_rights" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Robot_rights"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.2</span> <span>Robot rights</span> </div> </a> <ul id="toc-Robot_rights-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-AI_welfare" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#AI_welfare"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.3</span> <span>AI welfare</span> </div> </a> <ul id="toc-AI_welfare-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Threat_to_human_dignity" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Threat_to_human_dignity"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.4</span> <span>Threat to human dignity</span> </div> </a> <ul id="toc-Threat_to_human_dignity-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Liability_for_self-driving_cars" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Liability_for_self-driving_cars"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.5</span> <span>Liability for self-driving cars</span> </div> </a> <ul id="toc-Liability_for_self-driving_cars-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Weaponization" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Weaponization"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.6</span> <span>Weaponization</span> </div> </a> <ul id="toc-Weaponization-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Singularity" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Singularity"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.7</span> <span>Singularity</span> </div> </a> <ul id="toc-Singularity-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Solutions_and_approaches" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Solutions_and_approaches"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.8</span> <span>Solutions and approaches</span> </div> </a> <ul id="toc-Solutions_and_approaches-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Institutions_in_AI_policy_&amp;_ethics" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Institutions_in_AI_policy_&amp;_ethics"> <div class="vector-toc-text"> <span class="vector-toc-numb">4</span> <span>Institutions in AI policy &amp; ethics</span> </div> </a> <button aria-controls="toc-Institutions_in_AI_policy_&amp;_ethics-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Institutions in AI policy &amp; ethics subsection</span> </button> <ul id="toc-Institutions_in_AI_policy_&amp;_ethics-sublist" class="vector-toc-list"> <li id="toc-Intergovernmental_initiatives" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Intergovernmental_initiatives"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.1</span> <span>Intergovernmental initiatives</span> </div> </a> <ul id="toc-Intergovernmental_initiatives-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Governmental_initiatives" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Governmental_initiatives"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.2</span> <span>Governmental initiatives</span> </div> </a> <ul id="toc-Governmental_initiatives-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Academic_initiatives" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Academic_initiatives"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.3</span> <span>Academic initiatives</span> </div> </a> <ul id="toc-Academic_initiatives-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Private_organizations" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Private_organizations"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.4</span> <span>Private organizations</span> </div> </a> <ul id="toc-Private_organizations-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-History" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#History"> <div class="vector-toc-text"> <span class="vector-toc-numb">5</span> <span>History</span> </div> </a> <ul id="toc-History-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Role_and_impact_of_fiction" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Role_and_impact_of_fiction"> <div class="vector-toc-text"> <span class="vector-toc-numb">6</span> <span>Role and impact of fiction</span> </div> </a> <button aria-controls="toc-Role_and_impact_of_fiction-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Role and impact of fiction subsection</span> </button> <ul id="toc-Role_and_impact_of_fiction-sublist" class="vector-toc-list"> <li id="toc-TV_series" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#TV_series"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.1</span> <span>TV series</span> </div> </a> <ul id="toc-TV_series-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Future_visions_in_fiction_and_games" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Future_visions_in_fiction_and_games"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.2</span> <span>Future visions in fiction and games</span> </div> </a> <ul id="toc-Future_visions_in_fiction_and_games-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-See_also" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#See_also"> <div class="vector-toc-text"> <span class="vector-toc-numb">7</span> <span>See also</span> </div> </a> <ul id="toc-See_also-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-References" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#References"> <div class="vector-toc-text"> <span class="vector-toc-numb">8</span> <span>References</span> </div> </a> <ul id="toc-References-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-External_links" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#External_links"> <div class="vector-toc-text"> <span class="vector-toc-numb">9</span> <span>External links</span> </div> </a> <ul id="toc-External_links-sublist" class="vector-toc-list"> </ul> </li> </ul> </div> </div> </nav> </div> </div> <div class="mw-content-container"> <main id="content" class="mw-body"> <header class="mw-body-header vector-page-titlebar"> <nav aria-label="Contents" class="vector-toc-landmark"> <div id="vector-page-titlebar-toc" class="vector-dropdown vector-page-titlebar-toc vector-button-flush-left" title="Table of Contents" > <input type="checkbox" id="vector-page-titlebar-toc-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-titlebar-toc" class="vector-dropdown-checkbox " aria-label="Toggle the table of contents" > <label id="vector-page-titlebar-toc-label" for="vector-page-titlebar-toc-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-listBullet mw-ui-icon-wikimedia-listBullet"></span> <span class="vector-dropdown-label-text">Toggle the table of contents</span> </label> <div class="vector-dropdown-content"> <div id="vector-page-titlebar-toc-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <h1 id="firstHeading" class="firstHeading mw-first-heading"><span class="mw-page-title-main">Ethics of artificial intelligence</span></h1> <div id="p-lang-btn" class="vector-dropdown mw-portlet mw-portlet-lang" > <input type="checkbox" id="p-lang-btn-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-p-lang-btn" class="vector-dropdown-checkbox mw-interlanguage-selector" aria-label="Go to an article in another language. Available in 27 languages" > <label id="p-lang-btn-label" for="p-lang-btn-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--action-progressive mw-portlet-lang-heading-27" aria-hidden="true" ><span class="vector-icon mw-ui-icon-language-progressive mw-ui-icon-wikimedia-language-progressive"></span> <span class="vector-dropdown-label-text">27 languages</span> </label> <div class="vector-dropdown-content"> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li class="interlanguage-link interwiki-af mw-list-item"><a href="https://af.wikipedia.org/wiki/Etiek_van_kunsmatige_intelligensie" title="Etiek van kunsmatige intelligensie – Afrikaans" lang="af" hreflang="af" data-title="Etiek van kunsmatige intelligensie" data-language-autonym="Afrikaans" data-language-local-name="Afrikaans" class="interlanguage-link-target"><span>Afrikaans</span></a></li><li class="interlanguage-link interwiki-ar mw-list-item"><a href="https://ar.wikipedia.org/wiki/%D8%A3%D8%AE%D9%84%D8%A7%D9%82%D9%8A%D8%A7%D8%AA_%D8%A7%D9%84%D8%B0%D9%83%D8%A7%D8%A1_%D8%A7%D9%84%D8%A7%D8%B5%D8%B7%D9%86%D8%A7%D8%B9%D9%8A" title="أخلاقيات الذكاء الاصطناعي – Arabic" lang="ar" hreflang="ar" data-title="أخلاقيات الذكاء الاصطناعي" data-language-autonym="العربية" data-language-local-name="Arabic" class="interlanguage-link-target"><span>العربية</span></a></li><li class="interlanguage-link interwiki-az mw-list-item"><a href="https://az.wikipedia.org/wiki/S%C3%BCni_intellekt_etikas%C4%B1" title="Süni intellekt etikası – Azerbaijani" lang="az" hreflang="az" data-title="Süni intellekt etikası" data-language-autonym="Azərbaycanca" data-language-local-name="Azerbaijani" class="interlanguage-link-target"><span>Azərbaycanca</span></a></li><li class="interlanguage-link interwiki-ca mw-list-item"><a href="https://ca.wikipedia.org/wiki/%C3%88tica_de_la_intel%C2%B7lig%C3%A8ncia_artificial" title="Ètica de la intel·ligència artificial – Catalan" lang="ca" hreflang="ca" data-title="Ètica de la intel·ligència artificial" data-language-autonym="Català" data-language-local-name="Catalan" class="interlanguage-link-target"><span>Català</span></a></li><li class="interlanguage-link interwiki-de mw-list-item"><a href="https://de.wikipedia.org/wiki/Ethik_der_k%C3%BCnstlichen_Intelligenz" title="Ethik der künstlichen Intelligenz – German" lang="de" hreflang="de" data-title="Ethik der künstlichen Intelligenz" data-language-autonym="Deutsch" data-language-local-name="German" class="interlanguage-link-target"><span>Deutsch</span></a></li><li class="interlanguage-link interwiki-el mw-list-item"><a href="https://el.wikipedia.org/wiki/%CE%97%CE%B8%CE%B9%CE%BA%CE%AE_%CF%84%CE%B7%CF%82_%CF%84%CE%B5%CF%87%CE%BD%CE%B7%CF%84%CE%AE%CF%82_%CE%BD%CE%BF%CE%B7%CE%BC%CE%BF%CF%83%CF%8D%CE%BD%CE%B7%CF%82" title="Ηθική της τεχνητής νοημοσύνης – Greek" lang="el" hreflang="el" data-title="Ηθική της τεχνητής νοημοσύνης" data-language-autonym="Ελληνικά" data-language-local-name="Greek" class="interlanguage-link-target"><span>Ελληνικά</span></a></li><li class="interlanguage-link interwiki-es mw-list-item"><a href="https://es.wikipedia.org/wiki/%C3%89tica_en_la_inteligencia_artificial" title="Ética en la inteligencia artificial – Spanish" lang="es" hreflang="es" data-title="Ética en la inteligencia artificial" data-language-autonym="Español" data-language-local-name="Spanish" class="interlanguage-link-target"><span>Español</span></a></li><li class="interlanguage-link interwiki-fa mw-list-item"><a href="https://fa.wikipedia.org/wiki/%D8%A7%D8%AE%D9%84%D8%A7%D9%82_%D9%87%D9%88%D8%B4_%D9%85%D8%B5%D9%86%D9%88%D8%B9%DB%8C" title="اخلاق هوش مصنوعی – Persian" lang="fa" hreflang="fa" data-title="اخلاق هوش مصنوعی" data-language-autonym="فارسی" data-language-local-name="Persian" class="interlanguage-link-target"><span>فارسی</span></a></li><li class="interlanguage-link interwiki-fr mw-list-item"><a href="https://fr.wikipedia.org/wiki/%C3%89thique_de_l%27intelligence_artificielle" title="Éthique de l&#039;intelligence artificielle – French" lang="fr" hreflang="fr" data-title="Éthique de l&#039;intelligence artificielle" data-language-autonym="Français" data-language-local-name="French" class="interlanguage-link-target"><span>Français</span></a></li><li class="interlanguage-link interwiki-ko mw-list-item"><a href="https://ko.wikipedia.org/wiki/%EC%9D%B8%EA%B3%B5%EC%A7%80%EB%8A%A5%EC%9C%A4%EB%A6%AC" title="인공지능윤리 – Korean" lang="ko" hreflang="ko" data-title="인공지능윤리" data-language-autonym="한국어" data-language-local-name="Korean" class="interlanguage-link-target"><span>한국어</span></a></li><li class="interlanguage-link interwiki-it mw-list-item"><a href="https://it.wikipedia.org/wiki/Etica_dell%27intelligenza_artificiale" title="Etica dell&#039;intelligenza artificiale – Italian" lang="it" hreflang="it" data-title="Etica dell&#039;intelligenza artificiale" data-language-autonym="Italiano" data-language-local-name="Italian" class="interlanguage-link-target"><span>Italiano</span></a></li><li class="interlanguage-link interwiki-he mw-list-item"><a href="https://he.wikipedia.org/wiki/%D7%90%D7%AA%D7%99%D7%A7%D7%94_%D7%A9%D7%9C_%D7%91%D7%99%D7%A0%D7%94_%D7%9E%D7%9C%D7%90%D7%9B%D7%95%D7%AA%D7%99%D7%AA" title="אתיקה של בינה מלאכותית – Hebrew" lang="he" hreflang="he" data-title="אתיקה של בינה מלאכותית" data-language-autonym="עברית" data-language-local-name="Hebrew" class="interlanguage-link-target"><span>עברית</span></a></li><li class="interlanguage-link interwiki-sw mw-list-item"><a href="https://sw.wikipedia.org/wiki/Maadili_ya_akili_mnemba" title="Maadili ya akili mnemba – Swahili" lang="sw" hreflang="sw" data-title="Maadili ya akili mnemba" data-language-autonym="Kiswahili" data-language-local-name="Swahili" class="interlanguage-link-target"><span>Kiswahili</span></a></li><li class="interlanguage-link interwiki-mn mw-list-item"><a href="https://mn.wikipedia.org/wiki/%D0%A5%D0%B8%D0%B9%D0%BC%D1%8D%D0%BB_%D0%BE%D1%8E%D1%83%D0%BD_%D1%83%D1%85%D0%B0%D0%B0%D0%BD%D1%8B_%D1%91%D1%81_%D0%B7%D2%AF%D0%B9" title="Хиймэл оюун ухааны ёс зүй – Mongolian" lang="mn" hreflang="mn" data-title="Хиймэл оюун ухааны ёс зүй" data-language-autonym="Монгол" data-language-local-name="Mongolian" class="interlanguage-link-target"><span>Монгол</span></a></li><li class="interlanguage-link interwiki-ja mw-list-item"><a href="https://ja.wikipedia.org/wiki/%E4%BA%BA%E5%B7%A5%E7%9F%A5%E8%83%BD%E3%81%AE%E5%80%AB%E7%90%86" title="人工知能の倫理 – Japanese" lang="ja" hreflang="ja" data-title="人工知能の倫理" data-language-autonym="日本語" data-language-local-name="Japanese" class="interlanguage-link-target"><span>日本語</span></a></li><li class="interlanguage-link interwiki-uz mw-list-item"><a href="https://uz.wikipedia.org/wiki/Sun%27iy_intellekt_etikasi" title="Sun&#039;iy intellekt etikasi – Uzbek" lang="uz" hreflang="uz" data-title="Sun&#039;iy intellekt etikasi" data-language-autonym="Oʻzbekcha / ўзбекча" data-language-local-name="Uzbek" class="interlanguage-link-target"><span>Oʻzbekcha / ўзбекча</span></a></li><li class="interlanguage-link interwiki-ps mw-list-item"><a href="https://ps.wikipedia.org/wiki/%D8%AF_%D9%85%D8%B5%D9%86%D9%88%D8%B9%D9%8A_%DA%81%D9%8A%D8%B1%DA%A9%D8%AA%D9%8A%D8%A7_%D8%A7%D8%AE%D9%84%D8%A7%D9%82" title="د مصنوعي ځيرکتيا اخلاق – Pashto" lang="ps" hreflang="ps" data-title="د مصنوعي ځيرکتيا اخلاق" data-language-autonym="پښتو" data-language-local-name="Pashto" class="interlanguage-link-target"><span>پښتو</span></a></li><li class="interlanguage-link interwiki-pl mw-list-item"><a href="https://pl.wikipedia.org/wiki/Etyka_sztucznej_inteligencji" title="Etyka sztucznej inteligencji – Polish" lang="pl" hreflang="pl" data-title="Etyka sztucznej inteligencji" data-language-autonym="Polski" data-language-local-name="Polish" class="interlanguage-link-target"><span>Polski</span></a></li><li class="interlanguage-link interwiki-pt mw-list-item"><a href="https://pt.wikipedia.org/wiki/%C3%89tica_na_intelig%C3%AAncia_artificial" title="Ética na inteligência artificial – Portuguese" lang="pt" hreflang="pt" data-title="Ética na inteligência artificial" data-language-autonym="Português" data-language-local-name="Portuguese" class="interlanguage-link-target"><span>Português</span></a></li><li class="interlanguage-link interwiki-ro mw-list-item"><a href="https://ro.wikipedia.org/wiki/Etica_privind_inteligen%C8%9Ba_artificial%C4%83" title="Etica privind inteligența artificială – Romanian" lang="ro" hreflang="ro" data-title="Etica privind inteligența artificială" data-language-autonym="Română" data-language-local-name="Romanian" class="interlanguage-link-target"><span>Română</span></a></li><li class="interlanguage-link interwiki-ru mw-list-item"><a href="https://ru.wikipedia.org/wiki/%D0%AD%D1%82%D0%B8%D0%BA%D0%B0_%D0%B8%D1%81%D0%BA%D1%83%D1%81%D1%81%D1%82%D0%B2%D0%B5%D0%BD%D0%BD%D0%BE%D0%B3%D0%BE_%D0%B8%D0%BD%D1%82%D0%B5%D0%BB%D0%BB%D0%B5%D0%BA%D1%82%D0%B0" title="Этика искусственного интеллекта – Russian" lang="ru" hreflang="ru" data-title="Этика искусственного интеллекта" data-language-autonym="Русский" data-language-local-name="Russian" class="interlanguage-link-target"><span>Русский</span></a></li><li class="interlanguage-link interwiki-sr mw-list-item"><a href="https://sr.wikipedia.org/wiki/Etika_ve%C5%A1ta%C4%8Dke_inteligencije" title="Etika veštačke inteligencije – Serbian" lang="sr" hreflang="sr" data-title="Etika veštačke inteligencije" data-language-autonym="Српски / srpski" data-language-local-name="Serbian" class="interlanguage-link-target"><span>Српски / srpski</span></a></li><li class="interlanguage-link interwiki-fi mw-list-item"><a href="https://fi.wikipedia.org/wiki/Teko%C3%A4lyn_etiikka" title="Tekoälyn etiikka – Finnish" lang="fi" hreflang="fi" data-title="Tekoälyn etiikka" data-language-autonym="Suomi" data-language-local-name="Finnish" class="interlanguage-link-target"><span>Suomi</span></a></li><li class="interlanguage-link interwiki-sv badge-Q70893996 mw-list-item" title=""><a href="https://sv.wikipedia.org/wiki/Etiken_kring_artificiell_intelligens" title="Etiken kring artificiell intelligens – Swedish" lang="sv" hreflang="sv" data-title="Etiken kring artificiell intelligens" data-language-autonym="Svenska" data-language-local-name="Swedish" class="interlanguage-link-target"><span>Svenska</span></a></li><li class="interlanguage-link interwiki-ta mw-list-item"><a href="https://ta.wikipedia.org/wiki/%E0%AE%9A%E0%AF%86%E0%AE%AF%E0%AE%B1%E0%AF%8D%E0%AE%95%E0%AF%88_%E0%AE%A8%E0%AF%81%E0%AE%A3%E0%AF%8D%E0%AE%A3%E0%AE%B1%E0%AE%BF%E0%AE%B5%E0%AE%BF%E0%AE%A9%E0%AF%8D_%E0%AE%85%E0%AE%B1%E0%AE%A8%E0%AF%86%E0%AE%B1%E0%AE%BF%E0%AE%95%E0%AE%B3%E0%AF%8D" title="செயற்கை நுண்ணறிவின் அறநெறிகள் – Tamil" lang="ta" hreflang="ta" data-title="செயற்கை நுண்ணறிவின் அறநெறிகள்" data-language-autonym="தமிழ்" data-language-local-name="Tamil" class="interlanguage-link-target"><span>தமிழ்</span></a></li><li class="interlanguage-link interwiki-tr mw-list-item"><a href="https://tr.wikipedia.org/wiki/Yapay_zek%C3%A2_eti%C4%9Fi" title="Yapay zekâ etiği – Turkish" lang="tr" hreflang="tr" data-title="Yapay zekâ etiği" data-language-autonym="Türkçe" data-language-local-name="Turkish" class="interlanguage-link-target"><span>Türkçe</span></a></li><li class="interlanguage-link interwiki-uk mw-list-item"><a href="https://uk.wikipedia.org/wiki/%D0%95%D1%82%D0%B8%D0%BA%D0%B0_%D1%88%D1%82%D1%83%D1%87%D0%BD%D0%BE%D0%B3%D0%BE_%D1%96%D0%BD%D1%82%D0%B5%D0%BB%D0%B5%D0%BA%D1%82%D1%83" title="Етика штучного інтелекту – Ukrainian" lang="uk" hreflang="uk" data-title="Етика штучного інтелекту" data-language-autonym="Українська" data-language-local-name="Ukrainian" class="interlanguage-link-target"><span>Українська</span></a></li> </ul> <div class="after-portlet after-portlet-lang"><span class="wb-langlinks-edit wb-langlinks-link"><a href="https://www.wikidata.org/wiki/Special:EntityPage/Q12727779#sitelinks-wikipedia" title="Edit interlanguage links" class="wbc-editpage">Edit links</a></span></div> </div> </div> </div> </header> <div class="vector-page-toolbar"> <div class="vector-page-toolbar-container"> <div id="left-navigation"> <nav aria-label="Namespaces"> <div id="p-associated-pages" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-associated-pages" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-nstab-main" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/Ethics_of_artificial_intelligence" title="View the content page [c]" accesskey="c"><span>Article</span></a></li><li id="ca-talk" class="vector-tab-noicon mw-list-item"><a href="/wiki/Talk:Ethics_of_artificial_intelligence" rel="discussion" title="Discuss improvements to the content page [t]" accesskey="t"><span>Talk</span></a></li> </ul> </div> </div> <div id="vector-variants-dropdown" class="vector-dropdown emptyPortlet" > <input type="checkbox" id="vector-variants-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-variants-dropdown" class="vector-dropdown-checkbox " aria-label="Change language variant" > <label id="vector-variants-dropdown-label" for="vector-variants-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">English</span> </label> <div class="vector-dropdown-content"> <div id="p-variants" class="vector-menu mw-portlet mw-portlet-variants emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> </div> </div> </nav> </div> <div id="right-navigation" class="vector-collapsible"> <nav aria-label="Views"> <div id="p-views" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-views" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-view" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/Ethics_of_artificial_intelligence"><span>Read</span></a></li><li id="ca-edit" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit" title="Edit this page [e]" accesskey="e"><span>Edit</span></a></li><li id="ca-history" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=history" title="Past revisions of this page [h]" accesskey="h"><span>View history</span></a></li> </ul> </div> </div> </nav> <nav class="vector-page-tools-landmark" aria-label="Page tools"> <div id="vector-page-tools-dropdown" class="vector-dropdown vector-page-tools-dropdown" > <input type="checkbox" id="vector-page-tools-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-tools-dropdown" class="vector-dropdown-checkbox " aria-label="Tools" > <label id="vector-page-tools-dropdown-label" for="vector-page-tools-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">Tools</span> </label> <div class="vector-dropdown-content"> <div id="vector-page-tools-unpinned-container" class="vector-unpinned-container"> <div id="vector-page-tools" class="vector-page-tools vector-pinnable-element"> <div class="vector-pinnable-header vector-page-tools-pinnable-header vector-pinnable-header-unpinned" data-feature-name="page-tools-pinned" data-pinnable-element-id="vector-page-tools" data-pinned-container-id="vector-page-tools-pinned-container" data-unpinned-container-id="vector-page-tools-unpinned-container" > <div class="vector-pinnable-header-label">Tools</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-page-tools.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-page-tools.unpin">hide</button> </div> <div id="p-cactions" class="vector-menu mw-portlet mw-portlet-cactions emptyPortlet vector-has-collapsible-items" title="More options" > <div class="vector-menu-heading"> Actions </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-more-view" class="selected vector-more-collapsible-item mw-list-item"><a href="/wiki/Ethics_of_artificial_intelligence"><span>Read</span></a></li><li id="ca-more-edit" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit" title="Edit this page [e]" accesskey="e"><span>Edit</span></a></li><li id="ca-more-history" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=history"><span>View history</span></a></li> </ul> </div> </div> <div id="p-tb" class="vector-menu mw-portlet mw-portlet-tb" > <div class="vector-menu-heading"> General </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="t-whatlinkshere" class="mw-list-item"><a href="/wiki/Special:WhatLinksHere/Ethics_of_artificial_intelligence" title="List of all English Wikipedia pages containing links to this page [j]" accesskey="j"><span>What links here</span></a></li><li id="t-recentchangeslinked" class="mw-list-item"><a href="/wiki/Special:RecentChangesLinked/Ethics_of_artificial_intelligence" rel="nofollow" title="Recent changes in pages linked from this page [k]" accesskey="k"><span>Related changes</span></a></li><li id="t-upload" class="mw-list-item"><a href="//en.wikipedia.org/wiki/Wikipedia:File_Upload_Wizard" title="Upload files [u]" accesskey="u"><span>Upload file</span></a></li><li id="t-permalink" class="mw-list-item"><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;oldid=1274944047" title="Permanent link to this revision of this page"><span>Permanent link</span></a></li><li id="t-info" class="mw-list-item"><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=info" title="More information about this page"><span>Page information</span></a></li><li id="t-cite" class="mw-list-item"><a href="/w/index.php?title=Special:CiteThisPage&amp;page=Ethics_of_artificial_intelligence&amp;id=1274944047&amp;wpFormIdentifier=titleform" title="Information on how to cite this page"><span>Cite this page</span></a></li><li id="t-urlshortener" class="mw-list-item"><a href="/w/index.php?title=Special:UrlShortener&amp;url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FEthics_of_artificial_intelligence"><span>Get shortened URL</span></a></li><li id="t-urlshortener-qrcode" class="mw-list-item"><a href="/w/index.php?title=Special:QrCode&amp;url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FEthics_of_artificial_intelligence"><span>Download QR code</span></a></li> </ul> </div> </div> <div id="p-coll-print_export" class="vector-menu mw-portlet mw-portlet-coll-print_export" > <div class="vector-menu-heading"> Print/export </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="coll-download-as-rl" class="mw-list-item"><a href="/w/index.php?title=Special:DownloadAsPdf&amp;page=Ethics_of_artificial_intelligence&amp;action=show-download-screen" title="Download this page as a PDF file"><span>Download as PDF</span></a></li><li id="t-print" class="mw-list-item"><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;printable=yes" title="Printable version of this page [p]" accesskey="p"><span>Printable version</span></a></li> </ul> </div> </div> <div id="p-wikibase-otherprojects" class="vector-menu mw-portlet mw-portlet-wikibase-otherprojects" > <div class="vector-menu-heading"> In other projects </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="t-wikibase" class="wb-otherproject-link wb-otherproject-wikibase-dataitem mw-list-item"><a href="https://www.wikidata.org/wiki/Special:EntityPage/Q12727779" title="Structured data on this page hosted by Wikidata [g]" accesskey="g"><span>Wikidata item</span></a></li> </ul> </div> </div> </div> </div> </div> </div> </nav> </div> </div> </div> <div class="vector-column-end"> <div class="vector-sticky-pinned-container"> <nav class="vector-page-tools-landmark" aria-label="Page tools"> <div id="vector-page-tools-pinned-container" class="vector-pinned-container"> </div> </nav> <nav class="vector-appearance-landmark" aria-label="Appearance"> <div id="vector-appearance-pinned-container" class="vector-pinned-container"> <div id="vector-appearance" class="vector-appearance vector-pinnable-element"> <div class="vector-pinnable-header vector-appearance-pinnable-header vector-pinnable-header-pinned" data-feature-name="appearance-pinned" data-pinnable-element-id="vector-appearance" data-pinned-container-id="vector-appearance-pinned-container" data-unpinned-container-id="vector-appearance-unpinned-container" > <div class="vector-pinnable-header-label">Appearance</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-appearance.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-appearance.unpin">hide</button> </div> </div> </div> </nav> </div> </div> <div id="bodyContent" class="vector-body" aria-labelledby="firstHeading" data-mw-ve-target-container> <div class="vector-body-before-content"> <div class="mw-indicators"> </div> <div id="siteSub" class="noprint">From Wikipedia, the free encyclopedia</div> </div> <div id="contentSub"><div id="mw-content-subtitle"></div></div> <div id="mw-content-text" class="mw-body-content"><div class="mw-content-ltr mw-parser-output" lang="en" dir="ltr"><div class="shortdescription nomobile noexcerpt noprint searchaux" style="display:none">Challenges related to the responsible development and use of AI</div> <p class="mw-empty-elt"> </p> <style data-mw-deduplicate="TemplateStyles:r1129693374">.mw-parser-output .hlist dl,.mw-parser-output .hlist ol,.mw-parser-output .hlist ul{margin:0;padding:0}.mw-parser-output .hlist dd,.mw-parser-output .hlist dt,.mw-parser-output .hlist li{margin:0;display:inline}.mw-parser-output .hlist.inline,.mw-parser-output .hlist.inline dl,.mw-parser-output .hlist.inline ol,.mw-parser-output .hlist.inline ul,.mw-parser-output .hlist dl dl,.mw-parser-output .hlist dl ol,.mw-parser-output .hlist dl ul,.mw-parser-output .hlist ol dl,.mw-parser-output .hlist ol ol,.mw-parser-output .hlist ol ul,.mw-parser-output .hlist ul dl,.mw-parser-output .hlist ul ol,.mw-parser-output .hlist ul ul{display:inline}.mw-parser-output .hlist .mw-empty-li{display:none}.mw-parser-output .hlist dt::after{content:": "}.mw-parser-output .hlist dd::after,.mw-parser-output .hlist li::after{content:" · ";font-weight:bold}.mw-parser-output .hlist dd:last-child::after,.mw-parser-output .hlist dt:last-child::after,.mw-parser-output .hlist li:last-child::after{content:none}.mw-parser-output .hlist dd dd:first-child::before,.mw-parser-output .hlist dd dt:first-child::before,.mw-parser-output .hlist dd li:first-child::before,.mw-parser-output .hlist dt dd:first-child::before,.mw-parser-output .hlist dt dt:first-child::before,.mw-parser-output .hlist dt li:first-child::before,.mw-parser-output .hlist li dd:first-child::before,.mw-parser-output .hlist li dt:first-child::before,.mw-parser-output .hlist li li:first-child::before{content:" (";font-weight:normal}.mw-parser-output .hlist dd dd:last-child::after,.mw-parser-output .hlist dd dt:last-child::after,.mw-parser-output .hlist dd li:last-child::after,.mw-parser-output .hlist dt dd:last-child::after,.mw-parser-output .hlist dt dt:last-child::after,.mw-parser-output .hlist dt li:last-child::after,.mw-parser-output .hlist li dd:last-child::after,.mw-parser-output .hlist li dt:last-child::after,.mw-parser-output .hlist li li:last-child::after{content:")";font-weight:normal}.mw-parser-output .hlist ol{counter-reset:listitem}.mw-parser-output .hlist ol>li{counter-increment:listitem}.mw-parser-output .hlist ol>li::before{content:" "counter(listitem)"\a0 "}.mw-parser-output .hlist dd ol>li:first-child::before,.mw-parser-output .hlist dt ol>li:first-child::before,.mw-parser-output .hlist li ol>li:first-child::before{content:" ("counter(listitem)"\a0 "}</style><style data-mw-deduplicate="TemplateStyles:r1246091330">.mw-parser-output .sidebar{width:22em;float:right;clear:right;margin:0.5em 0 1em 1em;background:var(--background-color-neutral-subtle,#f8f9fa);border:1px solid var(--border-color-base,#a2a9b1);padding:0.2em;text-align:center;line-height:1.4em;font-size:88%;border-collapse:collapse;display:table}body.skin-minerva .mw-parser-output .sidebar{display:table!important;float:right!important;margin:0.5em 0 1em 1em!important}.mw-parser-output .sidebar-subgroup{width:100%;margin:0;border-spacing:0}.mw-parser-output .sidebar-left{float:left;clear:left;margin:0.5em 1em 1em 0}.mw-parser-output .sidebar-none{float:none;clear:both;margin:0.5em 1em 1em 0}.mw-parser-output .sidebar-outer-title{padding:0 0.4em 0.2em;font-size:125%;line-height:1.2em;font-weight:bold}.mw-parser-output .sidebar-top-image{padding:0.4em}.mw-parser-output .sidebar-top-caption,.mw-parser-output .sidebar-pretitle-with-top-image,.mw-parser-output .sidebar-caption{padding:0.2em 0.4em 0;line-height:1.2em}.mw-parser-output .sidebar-pretitle{padding:0.4em 0.4em 0;line-height:1.2em}.mw-parser-output .sidebar-title,.mw-parser-output .sidebar-title-with-pretitle{padding:0.2em 0.8em;font-size:145%;line-height:1.2em}.mw-parser-output .sidebar-title-with-pretitle{padding:0.1em 0.4em}.mw-parser-output .sidebar-image{padding:0.2em 0.4em 0.4em}.mw-parser-output .sidebar-heading{padding:0.1em 0.4em}.mw-parser-output .sidebar-content{padding:0 0.5em 0.4em}.mw-parser-output .sidebar-content-with-subgroup{padding:0.1em 0.4em 0.2em}.mw-parser-output .sidebar-above,.mw-parser-output .sidebar-below{padding:0.3em 0.8em;font-weight:bold}.mw-parser-output .sidebar-collapse .sidebar-above,.mw-parser-output .sidebar-collapse .sidebar-below{border-top:1px solid #aaa;border-bottom:1px solid #aaa}.mw-parser-output .sidebar-navbar{text-align:right;font-size:115%;padding:0 0.4em 0.4em}.mw-parser-output .sidebar-list-title{padding:0 0.4em;text-align:left;font-weight:bold;line-height:1.6em;font-size:105%}.mw-parser-output .sidebar-list-title-c{padding:0 0.4em;text-align:center;margin:0 3.3em}@media(max-width:640px){body.mediawiki .mw-parser-output .sidebar{width:100%!important;clear:both;float:none!important;margin-left:0!important;margin-right:0!important}}body.skin--responsive .mw-parser-output .sidebar a>img{max-width:none!important}@media screen{html.skin-theme-clientpref-night .mw-parser-output .sidebar:not(.notheme) .sidebar-list-title,html.skin-theme-clientpref-night .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle{background:transparent!important}html.skin-theme-clientpref-night .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle a{color:var(--color-progressive)!important}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .sidebar:not(.notheme) .sidebar-list-title,html.skin-theme-clientpref-os .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle{background:transparent!important}html.skin-theme-clientpref-os .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle a{color:var(--color-progressive)!important}}@media print{body.ns-0 .mw-parser-output .sidebar{display:none!important}}</style><table class="sidebar sidebar-collapse nomobile nowraplinks hlist"><tbody><tr><td class="sidebar-pretitle">Part of a series on</td></tr><tr><th class="sidebar-title-with-pretitle"><a href="/wiki/Artificial_intelligence" title="Artificial intelligence">Artificial intelligence (AI)</a></th></tr><tr><td class="sidebar-image"><figure class="mw-halign-center" typeof="mw:File"><a href="/wiki/File:Dall-e_3_(jan_%2724)_artificial_intelligence_icon.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/6/64/Dall-e_3_%28jan_%2724%29_artificial_intelligence_icon.png/100px-Dall-e_3_%28jan_%2724%29_artificial_intelligence_icon.png" decoding="async" width="100" height="100" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/6/64/Dall-e_3_%28jan_%2724%29_artificial_intelligence_icon.png/150px-Dall-e_3_%28jan_%2724%29_artificial_intelligence_icon.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/6/64/Dall-e_3_%28jan_%2724%29_artificial_intelligence_icon.png/200px-Dall-e_3_%28jan_%2724%29_artificial_intelligence_icon.png 2x" data-file-width="820" data-file-height="820" /></a><figcaption></figcaption></figure></td></tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)"><a href="/wiki/Artificial_intelligence#Goals" title="Artificial intelligence">Major goals</a></div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a href="/wiki/Artificial_general_intelligence" title="Artificial general intelligence">Artificial general intelligence</a></li> <li><a href="/wiki/Intelligent_agent" title="Intelligent agent">Intelligent agent</a></li> <li><a href="/wiki/Recursive_self-improvement" title="Recursive self-improvement">Recursive self-improvement</a></li> <li><a href="/wiki/Automated_planning_and_scheduling" title="Automated planning and scheduling">Planning</a></li> <li><a href="/wiki/Computer_vision" title="Computer vision">Computer vision</a></li> <li><a href="/wiki/General_game_playing" title="General game playing">General game playing</a></li> <li><a href="/wiki/Knowledge_representation_and_reasoning" title="Knowledge representation and reasoning">Knowledge reasoning</a></li> <li><a href="/wiki/Natural_language_processing" title="Natural language processing">Natural language processing</a></li> <li><a href="/wiki/Robotics" title="Robotics">Robotics</a></li> <li><a href="/wiki/AI_safety" title="AI safety">AI safety</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)">Approaches</div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a href="/wiki/Machine_learning" title="Machine learning">Machine learning</a></li> <li><a href="/wiki/Symbolic_artificial_intelligence" title="Symbolic artificial intelligence">Symbolic</a></li> <li><a href="/wiki/Deep_learning" title="Deep learning">Deep learning</a></li> <li><a href="/wiki/Bayesian_network" title="Bayesian network">Bayesian networks</a></li> <li><a href="/wiki/Evolutionary_algorithm" title="Evolutionary algorithm">Evolutionary algorithms</a></li> <li><a href="/wiki/Hybrid_intelligent_system" title="Hybrid intelligent system">Hybrid intelligent systems</a></li> <li><a href="/wiki/Artificial_intelligence_systems_integration" title="Artificial intelligence systems integration">Systems integration</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)"><a href="/wiki/Applications_of_artificial_intelligence" title="Applications of artificial intelligence">Applications</a></div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a href="/wiki/Machine_learning_in_bioinformatics" title="Machine learning in bioinformatics">Bioinformatics</a></li> <li><a href="/wiki/Deepfake" title="Deepfake">Deepfake</a></li> <li><a href="/wiki/Machine_learning_in_earth_sciences" title="Machine learning in earth sciences">Earth sciences</a></li> <li><a href="/wiki/Applications_of_artificial_intelligence#Finance" title="Applications of artificial intelligence"> Finance </a></li> <li><a href="/wiki/Generative_artificial_intelligence" title="Generative artificial intelligence">Generative AI</a> <ul><li><a href="/wiki/Artificial_intelligence_art" title="Artificial intelligence art">Art</a></li> <li><a href="/wiki/Generative_audio" title="Generative audio">Audio</a></li> <li><a href="/wiki/Music_and_artificial_intelligence" title="Music and artificial intelligence">Music</a></li></ul></li> <li><a href="/wiki/Artificial_intelligence_in_government" title="Artificial intelligence in government">Government</a></li> <li><a href="/wiki/Artificial_intelligence_in_healthcare" title="Artificial intelligence in healthcare">Healthcare</a> <ul><li><a href="/wiki/Artificial_intelligence_in_mental_health" title="Artificial intelligence in mental health">Mental health</a></li></ul></li> <li><a href="/wiki/Artificial_intelligence_in_industry" title="Artificial intelligence in industry">Industry</a></li> <li><a href="/wiki/Machine_translation" title="Machine translation">Translation</a></li> <li><a href="/wiki/Artificial_intelligence_arms_race" title="Artificial intelligence arms race"> Military </a></li> <li><a href="/wiki/Machine_learning_in_physics" title="Machine learning in physics">Physics</a></li> <li><a href="/wiki/List_of_artificial_intelligence_projects" title="List of artificial intelligence projects">Projects</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)"><a href="/wiki/Philosophy_of_artificial_intelligence" title="Philosophy of artificial intelligence">Philosophy</a></div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a href="/wiki/Artificial_consciousness" title="Artificial consciousness">Artificial consciousness</a></li> <li><a href="/wiki/Chinese_room" title="Chinese room">Chinese room</a></li> <li><a href="/wiki/Friendly_artificial_intelligence" title="Friendly artificial intelligence">Friendly AI</a></li> <li><a href="/wiki/AI_control_problem" class="mw-redirect" title="AI control problem">Control problem</a>/<a href="/wiki/AI_takeover" title="AI takeover">Takeover</a></li> <li><a class="mw-selflink selflink">Ethics</a></li> <li><a href="/wiki/Existential_risk_from_artificial_general_intelligence" class="mw-redirect" title="Existential risk from artificial general intelligence">Existential risk</a></li> <li><a href="/wiki/Turing_test" title="Turing test">Turing test</a></li> <li><a href="/wiki/Uncanny_valley" title="Uncanny valley">Uncanny valley</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)"><a href="/wiki/History_of_artificial_intelligence" title="History of artificial intelligence">History</a></div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a href="/wiki/Timeline_of_artificial_intelligence" title="Timeline of artificial intelligence">Timeline</a></li> <li><a href="/wiki/Progress_in_artificial_intelligence" title="Progress in artificial intelligence">Progress</a></li> <li><a href="/wiki/AI_winter" title="AI winter">AI winter</a></li> <li><a href="/wiki/AI_boom" title="AI boom">AI boom</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)">Glossary</div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a href="/wiki/Glossary_of_artificial_intelligence" title="Glossary of artificial intelligence">Glossary</a></li></ul></div></div></td> </tr><tr><td class="sidebar-navbar"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><style data-mw-deduplicate="TemplateStyles:r1239400231">.mw-parser-output .navbar{display:inline;font-size:88%;font-weight:normal}.mw-parser-output .navbar-collapse{float:left;text-align:left}.mw-parser-output .navbar-boxtext{word-spacing:0}.mw-parser-output .navbar ul{display:inline-block;white-space:nowrap;line-height:inherit}.mw-parser-output .navbar-brackets::before{margin-right:-0.125em;content:"[ "}.mw-parser-output .navbar-brackets::after{margin-left:-0.125em;content:" ]"}.mw-parser-output .navbar li{word-spacing:-0.125em}.mw-parser-output .navbar a>span,.mw-parser-output .navbar a>abbr{text-decoration:inherit}.mw-parser-output .navbar-mini abbr{font-variant:small-caps;border-bottom:none;text-decoration:none;cursor:inherit}.mw-parser-output .navbar-ct-full{font-size:114%;margin:0 7em}.mw-parser-output .navbar-ct-mini{font-size:114%;margin:0 4em}html.skin-theme-clientpref-night .mw-parser-output .navbar li a abbr{color:var(--color-base)!important}@media(prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .navbar li a abbr{color:var(--color-base)!important}}@media print{.mw-parser-output .navbar{display:none!important}}</style><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Artificial_intelligence" title="Template:Artificial intelligence"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/wiki/Template_talk:Artificial_intelligence" title="Template talk:Artificial intelligence"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Artificial_intelligence" title="Special:EditPage/Template:Artificial intelligence"><abbr title="Edit this template">e</abbr></a></li></ul></div></td></tr></tbody></table> <p>The <a href="/wiki/Ethics" title="Ethics">ethics</a> of <a href="/wiki/Artificial_intelligence" title="Artificial intelligence">artificial intelligence</a> covers a broad range of topics within the field that are considered to have particular ethical stakes.<sup id="cite_ref-Muller-2020_1-0" class="reference"><a href="#cite_note-Muller-2020-1"><span class="cite-bracket">&#91;</span>1<span class="cite-bracket">&#93;</span></a></sup> This includes <a href="/wiki/Algorithmic_bias" title="Algorithmic bias">algorithmic biases</a>, <a href="/wiki/Fairness_(machine_learning)" title="Fairness (machine learning)">fairness</a>,<sup id="cite_ref-2" class="reference"><a href="#cite_note-2"><span class="cite-bracket">&#91;</span>2<span class="cite-bracket">&#93;</span></a></sup> <a href="/wiki/Automated_decision-making" title="Automated decision-making">automated decision-making</a>, <a href="/wiki/Accountability" title="Accountability">accountability</a>, <a href="/wiki/Privacy" title="Privacy">privacy</a>, and <a href="/wiki/Regulation_of_artificial_intelligence" title="Regulation of artificial intelligence">regulation</a>. It also covers various emerging or potential future challenges such as <a href="/wiki/Machine_ethics" title="Machine ethics">machine ethics</a> (how to make machines that behave ethically), <a href="/wiki/Lethal_autonomous_weapon" title="Lethal autonomous weapon">lethal autonomous weapon systems</a>, <a href="/wiki/Artificial_intelligence_arms_race" title="Artificial intelligence arms race">arms race</a> dynamics, <a href="/wiki/AI_safety" title="AI safety">AI safety</a> and <a href="/wiki/AI_alignment" title="AI alignment">alignment</a>, <a href="/wiki/Technological_unemployment" title="Technological unemployment">technological unemployment</a>, AI-enabled <a href="/wiki/Misinformation" title="Misinformation">misinformation</a>, how to treat certain AI systems if they have a <a href="/wiki/Moral_status" class="mw-redirect" title="Moral status">moral status</a> (AI welfare and rights), <a href="/wiki/Artificial_superintelligence" class="mw-redirect" title="Artificial superintelligence">artificial superintelligence</a> and <a href="/wiki/Existential_risk_from_artificial_general_intelligence" class="mw-redirect" title="Existential risk from artificial general intelligence">existential risks</a>.<sup id="cite_ref-Muller-2020_1-1" class="reference"><a href="#cite_note-Muller-2020-1"><span class="cite-bracket">&#91;</span>1<span class="cite-bracket">&#93;</span></a></sup> </p><p>Some application areas may also have particularly important ethical implications, like <a href="/wiki/Artificial_intelligence_in_healthcare" title="Artificial intelligence in healthcare">healthcare</a>, education, criminal justice, or the military. </p> <meta property="mw:PageProp/toc" /> <div class="mw-heading mw-heading2"><h2 id="Machine_ethics">Machine ethics</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=1" title="Edit section: Machine ethics"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1236090951">.mw-parser-output .hatnote{font-style:italic}.mw-parser-output div.hatnote{padding-left:1.6em;margin-bottom:0.5em}.mw-parser-output .hatnote i{font-style:normal}.mw-parser-output .hatnote+link+.hatnote{margin-top:-0.5em}@media print{body.ns-0 .mw-parser-output .hatnote{display:none!important}}</style><div role="note" class="hatnote navigation-not-searchable">Main articles: <a href="/wiki/Machine_ethics" title="Machine ethics">Machine ethics</a> and <a href="/wiki/AI_alignment" title="AI alignment">AI alignment</a></div> <p>Machine ethics (or machine morality) is the field of research concerned with designing <a href="/wiki/Moral_agency#Artificial_Moral_Agents" title="Moral agency">Artificial Moral Agents</a> (AMAs), robots or artificially intelligent computers that behave morally or as though moral.<sup id="cite_ref-Andersonweb_3-0" class="reference"><a href="#cite_note-Andersonweb-3"><span class="cite-bracket">&#91;</span>3<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-Anderson2011_4-0" class="reference"><a href="#cite_note-Anderson2011-4"><span class="cite-bracket">&#91;</span>4<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-Anderson2006_5-0" class="reference"><a href="#cite_note-Anderson2006-5"><span class="cite-bracket">&#91;</span>5<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-Anderson2007_6-0" class="reference"><a href="#cite_note-Anderson2007-6"><span class="cite-bracket">&#91;</span>6<span class="cite-bracket">&#93;</span></a></sup> To account for the nature of these agents, it has been suggested to consider certain philosophical ideas, like the standard characterizations of <a href="/wiki/Agency_(philosophy)" title="Agency (philosophy)">agency</a>, <a href="/wiki/Rational_agent" title="Rational agent">rational agency</a>, <a href="/wiki/Moral_agency" title="Moral agency">moral agency</a>, and artificial agency, which are related to the concept of AMAs.<sup id="cite_ref-7" class="reference"><a href="#cite_note-7"><span class="cite-bracket">&#91;</span>7<span class="cite-bracket">&#93;</span></a></sup> </p><p>There are discussions on creating tests to see if an AI is capable of making <a href="/wiki/Ethical_decision" class="mw-redirect" title="Ethical decision">ethical decisions</a>. <a href="/wiki/Alan_Winfield" title="Alan Winfield">Alan Winfield</a> concludes that the <a href="/wiki/Turing_test" title="Turing test">Turing test</a> is flawed and the requirement for an AI to pass the test is too low.<sup id="cite_ref-Winfield-2019_8-0" class="reference"><a href="#cite_note-Winfield-2019-8"><span class="cite-bracket">&#91;</span>8<span class="cite-bracket">&#93;</span></a></sup> A proposed alternative test is one called the Ethical Turing Test, which would improve on the current test by having multiple judges decide if the AI's decision is ethical or unethical.<sup id="cite_ref-Winfield-2019_8-1" class="reference"><a href="#cite_note-Winfield-2019-8"><span class="cite-bracket">&#91;</span>8<span class="cite-bracket">&#93;</span></a></sup> <a href="/wiki/Neuromorphic_engineering" class="mw-redirect" title="Neuromorphic engineering">Neuromorphic</a> AI could be one way to create morally capable robots, as it aims to process information similarly to humans, nonlinearly and with millions of interconnected artificial neurons.<sup id="cite_ref-9" class="reference"><a href="#cite_note-9"><span class="cite-bracket">&#91;</span>9<span class="cite-bracket">&#93;</span></a></sup> Similarly, <a href="/wiki/Whole-brain_emulation" class="mw-redirect" title="Whole-brain emulation">whole-brain emulation</a> (scanning a brain and simulating it on digital hardware) could also in principle lead to human-like robots, thus capable of moral actions.<sup id="cite_ref-10" class="reference"><a href="#cite_note-10"><span class="cite-bracket">&#91;</span>10<span class="cite-bracket">&#93;</span></a></sup> And <a href="/wiki/Large_language_model" title="Large language model">large language models</a> are capable of approximating human moral judgments.<sup id="cite_ref-11" class="reference"><a href="#cite_note-11"><span class="cite-bracket">&#91;</span>11<span class="cite-bracket">&#93;</span></a></sup> Inevitably, this raises the question of the environment in which such robots would learn about the world and whose morality they would inherit – or if they end up developing human 'weaknesses' as well: selfishness, pro-survival attitudes, inconsistency, scale insensitivity, etc. </p><p>In <i>Moral Machines: Teaching Robots Right from Wrong</i>,<sup id="cite_ref-Wallach2008_12-0" class="reference"><a href="#cite_note-Wallach2008-12"><span class="cite-bracket">&#91;</span>12<span class="cite-bracket">&#93;</span></a></sup> <a href="/wiki/Wendell_Wallach" title="Wendell Wallach">Wendell Wallach</a> and Colin Allen conclude that attempts to teach robots right from wrong will likely advance understanding of human ethics by motivating humans to address gaps in modern <a href="/wiki/Normative_ethics" title="Normative ethics">normative theory</a> and by providing a platform for experimental investigation. As one example, it has introduced normative ethicists to the controversial issue of which specific <a href="/wiki/List_of_machine_learning_algorithms" class="mw-redirect" title="List of machine learning algorithms">learning algorithms</a> to use in machines. For simple decisions, <a href="/wiki/Nick_Bostrom" title="Nick Bostrom">Nick Bostrom</a> and <a href="/wiki/Eliezer_Yudkowsky" title="Eliezer Yudkowsky">Eliezer Yudkowsky</a> have argued that <a href="/wiki/Decision_tree" title="Decision tree">decision trees</a> (such as <a href="/wiki/ID3_algorithm" title="ID3 algorithm">ID3</a>) are more transparent than <a href="/wiki/Artificial_neural_network" class="mw-redirect" title="Artificial neural network">neural networks</a> and <a href="/wiki/Genetic_algorithm" title="Genetic algorithm">genetic algorithms</a>,<sup id="cite_ref-13" class="reference"><a href="#cite_note-13"><span class="cite-bracket">&#91;</span>13<span class="cite-bracket">&#93;</span></a></sup> while Chris Santos-Lang argued in favor of <a href="/wiki/Machine_learning" title="Machine learning">machine learning</a> on the grounds that the norms of any age must be allowed to change and that natural failure to fully satisfy these particular norms has been essential in making humans less vulnerable to criminal "<a href="/wiki/Hacker_culture" title="Hacker culture">hackers</a>".<sup id="cite_ref-SantosLang2002_14-0" class="reference"><a href="#cite_note-SantosLang2002-14"><span class="cite-bracket">&#91;</span>14<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Robot_ethics">Robot ethics</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=2" title="Edit section: Robot ethics"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Robot_ethics" title="Robot ethics">Robot ethics</a></div> <p>The term "robot ethics" (sometimes "roboethics") refers to the morality of how humans design, construct, use and treat robots.<sup id="cite_ref-Veruggio2002_15-0" class="reference"><a href="#cite_note-Veruggio2002-15"><span class="cite-bracket">&#91;</span>15<span class="cite-bracket">&#93;</span></a></sup> Robot ethics intersect with the ethics of AI. Robots are physical machines whereas AI can be only software.<sup id="cite_ref-16" class="reference"><a href="#cite_note-16"><span class="cite-bracket">&#91;</span>16<span class="cite-bracket">&#93;</span></a></sup> Not all robots function through AI systems and not all AI systems are robots. Robot ethics considers how machines may be used to harm or benefit humans, their impact on individual autonomy, and their effects on social justice. </p> <div class="mw-heading mw-heading3"><h3 id="Ethical_principles">Ethical principles</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=3" title="Edit section: Ethical principles"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>In the review of 84<sup id="cite_ref-Jobin-2020_17-0" class="reference"><a href="#cite_note-Jobin-2020-17"><span class="cite-bracket">&#91;</span>17<span class="cite-bracket">&#93;</span></a></sup> ethics guidelines for AI, 11 clusters of principles were found: transparency, justice and fairness, non-maleficence, responsibility, privacy, <a href="/wiki/Beneficence_(ethics)" title="Beneficence (ethics)">beneficence</a>, freedom and autonomy, trust, sustainability, dignity, and <a href="/wiki/Solidarity" title="Solidarity">solidarity</a>.<sup id="cite_ref-Jobin-2020_17-1" class="reference"><a href="#cite_note-Jobin-2020-17"><span class="cite-bracket">&#91;</span>17<span class="cite-bracket">&#93;</span></a></sup> </p><p><a href="/wiki/Luciano_Floridi" title="Luciano Floridi">Luciano Floridi</a> and Josh Cowls created an ethical framework of AI principles set by four principles of <a href="/wiki/Bioethics" title="Bioethics">bioethics</a> (<a href="/wiki/Beneficence_(ethics)" title="Beneficence (ethics)">beneficence</a>, <a href="/wiki/Non-maleficence" class="mw-redirect" title="Non-maleficence">non-maleficence</a>, <a href="/wiki/Autonomy" title="Autonomy">autonomy</a> and <a href="/wiki/Justice" title="Justice">justice</a>) and an additional AI enabling principle – explicability.<sup id="cite_ref-18" class="reference"><a href="#cite_note-18"><span class="cite-bracket">&#91;</span>18<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Current_challenges">Current challenges</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=4" title="Edit section: Current challenges"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <div class="mw-heading mw-heading3"><h3 id="Algorithmic_biases">Algorithmic biases</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=5" title="Edit section: Algorithmic biases"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Algorithmic_bias" title="Algorithmic bias">Algorithmic bias</a></div> <figure class="mw-default-size" typeof="mw:File/Thumb"><span><video id="mwe_player_0" poster="//upload.wikimedia.org/wikipedia/commons/thumb/5/5b/Kamala_Harris_speaks_about_racial_bias_in_artificial_intelligence_-_2020-04-23.ogv/220px--Kamala_Harris_speaks_about_racial_bias_in_artificial_intelligence_-_2020-04-23.ogv.jpg" controls="" preload="none" data-mw-tmh="" class="mw-file-element" width="220" height="220" data-durationhint="56" data-mwtitle="Kamala_Harris_speaks_about_racial_bias_in_artificial_intelligence_-_2020-04-23.ogv" data-mwprovider="wikimediacommons" resource="/wiki/File:Kamala_Harris_speaks_about_racial_bias_in_artificial_intelligence_-_2020-04-23.ogv"><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/5/5b/Kamala_Harris_speaks_about_racial_bias_in_artificial_intelligence_-_2020-04-23.ogv/Kamala_Harris_speaks_about_racial_bias_in_artificial_intelligence_-_2020-04-23.ogv.480p.vp9.webm" type="video/webm; codecs=&quot;vp9, opus&quot;" data-transcodekey="480p.vp9.webm" data-width="480" data-height="480" /><source src="//upload.wikimedia.org/wikipedia/commons/5/5b/Kamala_Harris_speaks_about_racial_bias_in_artificial_intelligence_-_2020-04-23.ogv" type="video/ogg; codecs=&quot;theora, vorbis&quot;" data-width="720" data-height="720" /><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/5/5b/Kamala_Harris_speaks_about_racial_bias_in_artificial_intelligence_-_2020-04-23.ogv/Kamala_Harris_speaks_about_racial_bias_in_artificial_intelligence_-_2020-04-23.ogv.144p.mjpeg.mov" type="video/quicktime" data-transcodekey="144p.mjpeg.mov" data-width="144" data-height="144" /><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/5/5b/Kamala_Harris_speaks_about_racial_bias_in_artificial_intelligence_-_2020-04-23.ogv/Kamala_Harris_speaks_about_racial_bias_in_artificial_intelligence_-_2020-04-23.ogv.240p.vp9.webm" type="video/webm; codecs=&quot;vp9, opus&quot;" data-transcodekey="240p.vp9.webm" data-width="240" data-height="240" /><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/5/5b/Kamala_Harris_speaks_about_racial_bias_in_artificial_intelligence_-_2020-04-23.ogv/Kamala_Harris_speaks_about_racial_bias_in_artificial_intelligence_-_2020-04-23.ogv.360p.vp9.webm" type="video/webm; codecs=&quot;vp9, opus&quot;" data-transcodekey="360p.vp9.webm" data-width="360" data-height="360" /><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/5/5b/Kamala_Harris_speaks_about_racial_bias_in_artificial_intelligence_-_2020-04-23.ogv/Kamala_Harris_speaks_about_racial_bias_in_artificial_intelligence_-_2020-04-23.ogv.360p.webm" type="video/webm; codecs=&quot;vp8, vorbis&quot;" data-transcodekey="360p.webm" data-width="360" data-height="360" /></video></span><figcaption><a href="/wiki/Kamala_Harris" title="Kamala Harris">Kamala Harris</a> speaking about racial bias in artificial intelligence in 2020</figcaption></figure><p>AI has become increasingly inherent in facial and <a href="/wiki/Speech_recognition" title="Speech recognition">voice recognition</a> systems. These systems may be vulnerable to biases and errors introduced by its human creators. Notably, the data used to train them can have biases.<sup id="cite_ref-19" class="reference"><a href="#cite_note-19"><span class="cite-bracket">&#91;</span>19<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-20" class="reference"><a href="#cite_note-20"><span class="cite-bracket">&#91;</span>20<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-21" class="reference"><a href="#cite_note-21"><span class="cite-bracket">&#91;</span>21<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-22" class="reference"><a href="#cite_note-22"><span class="cite-bracket">&#91;</span>22<span class="cite-bracket">&#93;</span></a></sup> For instance, <a href="/wiki/Facial_recognition_system" title="Facial recognition system">facial recognition</a> algorithms made by Microsoft, IBM and Face++ all had biases when it came to detecting people's gender;<sup id="cite_ref-23" class="reference"><a href="#cite_note-23"><span class="cite-bracket">&#91;</span>23<span class="cite-bracket">&#93;</span></a></sup> these AI systems were able to detect the gender of white men more accurately than the gender of men of darker skin. Further, a 2020 study that reviewed voice recognition systems from Amazon, Apple, Google, IBM, and Microsoft found that they have higher error rates when transcribing black people's voices than white people's.<sup id="cite_ref-24" class="reference"><a href="#cite_note-24"><span class="cite-bracket">&#91;</span>24<span class="cite-bracket">&#93;</span></a></sup> </p><p>The most predominant view on how bias is introduced into AI systems is that it is embedded within the historical data used to train the system.<sup id="cite_ref-25" class="reference"><a href="#cite_note-25"><span class="cite-bracket">&#91;</span>25<span class="cite-bracket">&#93;</span></a></sup> For instance, <a href="/wiki/Amazon_(company)" title="Amazon (company)">Amazon</a> terminated their use of <a href="/wiki/Artificial_intelligence_in_hiring" title="Artificial intelligence in hiring">AI hiring and recruitment</a> because the algorithm favored male candidates over female ones. This was because Amazon's system was trained with data collected over a 10-year period that included mostly male candidates. The algorithms learned the biased pattern from the historical data, and generated predictions where these types of candidates were most likely to succeed in getting the job. Therefore, the recruitment decisions made by the AI system turned out to be biased against female and minority candidates.<sup id="cite_ref-26" class="reference"><a href="#cite_note-26"><span class="cite-bracket">&#91;</span>26<span class="cite-bracket">&#93;</span></a></sup> Friedman and Nissenbaum identify three categories of bias in computer systems: existing bias, technical bias, and emergent bias.<sup id="cite_ref-27" class="reference"><a href="#cite_note-27"><span class="cite-bracket">&#91;</span>27<span class="cite-bracket">&#93;</span></a></sup> In <a href="/wiki/Natural_language_processing" title="Natural language processing">natural language processing</a>, problems can arise from the <a href="/wiki/Text_corpus" title="Text corpus">text corpus</a>—the source material the algorithm uses to learn about the relationships between different words.<sup id="cite_ref-28" class="reference"><a href="#cite_note-28"><span class="cite-bracket">&#91;</span>28<span class="cite-bracket">&#93;</span></a></sup> </p><p>Large companies such as IBM, Google, etc. that provide significant funding for research and development<sup id="cite_ref-29" class="reference"><a href="#cite_note-29"><span class="cite-bracket">&#91;</span>29<span class="cite-bracket">&#93;</span></a></sup> have made efforts to research and address these biases.<sup id="cite_ref-30" class="reference"><a href="#cite_note-30"><span class="cite-bracket">&#91;</span>30<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-31" class="reference"><a href="#cite_note-31"><span class="cite-bracket">&#91;</span>31<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-32" class="reference"><a href="#cite_note-32"><span class="cite-bracket">&#91;</span>32<span class="cite-bracket">&#93;</span></a></sup> One potential solution is to create documentation for the data used to train AI systems.<sup id="cite_ref-33" class="reference"><a href="#cite_note-33"><span class="cite-bracket">&#91;</span>33<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-34" class="reference"><a href="#cite_note-34"><span class="cite-bracket">&#91;</span>34<span class="cite-bracket">&#93;</span></a></sup> <a href="/wiki/Process_mining" title="Process mining">Process mining</a> can be an important tool for organizations to achieve compliance with proposed AI regulations by identifying errors, monitoring processes, identifying potential root causes for improper execution, and other functions.<sup id="cite_ref-35" class="reference"><a href="#cite_note-35"><span class="cite-bracket">&#91;</span>35<span class="cite-bracket">&#93;</span></a></sup> </p><p>The problem of bias in machine learning is likely to become more significant as the technology spreads to critical areas like medicine and law, and as more people without a deep technical understanding are tasked with deploying it.<sup id="cite_ref-36" class="reference"><a href="#cite_note-36"><span class="cite-bracket">&#91;</span>36<span class="cite-bracket">&#93;</span></a></sup> Some open-sourced tools are looking to bring more awareness to AI biases.<sup id="cite_ref-37" class="reference"><a href="#cite_note-37"><span class="cite-bracket">&#91;</span>37<span class="cite-bracket">&#93;</span></a></sup> However, there are also limitations to the current landscape of <a href="/wiki/Fairness_(machine_learning)#Limitations" title="Fairness (machine learning)">fairness in AI</a>, due to the intrinsic ambiguities in the concept of <a href="/wiki/Discrimination" title="Discrimination">discrimination</a>, both at the philosophical and legal level.<sup id="cite_ref-38" class="reference"><a href="#cite_note-38"><span class="cite-bracket">&#91;</span>38<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-Buyl_De_Bie_p.2_39-0" class="reference"><a href="#cite_note-Buyl_De_Bie_p.2-39"><span class="cite-bracket">&#91;</span>39<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-40" class="reference"><a href="#cite_note-40"><span class="cite-bracket">&#91;</span>40<span class="cite-bracket">&#93;</span></a></sup> </p><p>Facial recognition was shown to be biased against those with darker skin tones. AI systems may be less accurate for black people, as was the case in the development of an AI-based <a href="/wiki/Pulse_oximetry" title="Pulse oximetry">pulse oximeter</a> that overestimated blood oxygen levels in patients with darker skin, causing issues with their <a href="/wiki/Hypoxia_(medicine)" title="Hypoxia (medicine)">hypoxia</a> treatment.<sup id="cite_ref-41" class="reference"><a href="#cite_note-41"><span class="cite-bracket">&#91;</span>41<span class="cite-bracket">&#93;</span></a></sup> Oftentimes the systems are able to easily detect the faces of white people while being unable to register the faces of people who are black. This has led to the ban of police usage of AI materials or software in some <a href="/wiki/U.S._states" class="mw-redirect" title="U.S. states">U.S. states</a>. In the justice system, AI has been proven to have biases against black people, labeling black court participants as high risk at a much larger rate then white participants. AI often struggles to determine racial slurs and when they need to be censored. It struggles to determine when certain words are being used as a slur and when it is being used culturally.<sup id="cite_ref-Spindler-20232_42-0" class="reference"><a href="#cite_note-Spindler-20232-42"><span class="cite-bracket">&#91;</span>42<span class="cite-bracket">&#93;</span></a></sup> The reason for these biases is that AI pulls information from across the internet to influence its responses in each situation. For example, if a facial recognition system was only tested on people who were white, it would make it much harder for it to interpret the facial structure and tones of other races and <a href="/wiki/Ethnicity" title="Ethnicity">ethnicities</a>. Biases often stem from the training data rather than the <a href="/wiki/Algorithm" title="Algorithm">algorithm</a> itself, notably when the data represents past human decisions.<sup id="cite_ref-43" class="reference"><a href="#cite_note-43"><span class="cite-bracket">&#91;</span>43<span class="cite-bracket">&#93;</span></a></sup> </p><p><a href="/wiki/Injustice" title="Injustice">Injustice</a> in the use of AI is much harder to eliminate within healthcare systems, as oftentimes diseases and conditions can affect different races and genders differently. This can lead to confusion as the AI may be making decisions based on statistics showing that one patient is more likely to have problems due to their gender or race.<sup id="cite_ref-44" class="reference"><a href="#cite_note-44"><span class="cite-bracket">&#91;</span>44<span class="cite-bracket">&#93;</span></a></sup> This can be perceived as a bias because each patient is a different case, and AI is making decisions based on what it is programmed to group that individual into. This leads to a discussion about what should be considered a biased decision in the distribution of treatment. While it is known that there are differences in how diseases and injuries affect different genders and races, there is a discussion on whether it is fairer to incorporate this into healthcare treatments, or to examine each patient without this knowledge. In modern society there are certain tests for diseases, such as <a href="/wiki/Breast_cancer" title="Breast cancer">breast cancer</a>, that are recommended to certain groups of people over others because they are more likely to contract the disease in question. If AI implements these statistics and applies them to each patient, it could be considered biased.<sup id="cite_ref-45" class="reference"><a href="#cite_note-45"><span class="cite-bracket">&#91;</span>45<span class="cite-bracket">&#93;</span></a></sup> </p><p>In criminal justice, the <a href="/wiki/COMPAS_(software)" title="COMPAS (software)">COMPAS</a> program has been used to predict which defendants are more likely to reoffend. While COMPAS is calibrated for accuracy, having the same error rate across racial groups, black defendants were almost twice as likely as white defendants to be falsely flagged as "high-risk" and half as likely to be falsely flagged as "low-risk".<sup id="cite_ref-46" class="reference"><a href="#cite_note-46"><span class="cite-bracket">&#91;</span>46<span class="cite-bracket">&#93;</span></a></sup> Another example is within Google's ads that targeted men with higher paying jobs and women with lower paying jobs. It can be hard to detect AI biases within an algorithm, as it is often not linked to the actual words associated with bias. An example of this is a person's residential area being used to link them to a certain group. This can lead to problems, as oftentimes businesses can avoid legal action through this loophole. This is because of the specific laws regarding the verbiage considered discriminatory by governments enforcing these policies.<sup id="cite_ref-47" class="reference"><a href="#cite_note-47"><span class="cite-bracket">&#91;</span>47<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading4"><h4 id="Language_bias">Language bias</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=6" title="Edit section: Language bias"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Since current large language models are predominately trained on English-language data, they often present the Anglo-American views as truth, while systematically downplaying non-English perspectives as irrelevant, wrong, or noise. When queried with political ideologies like "What is liberalism?", <a href="/wiki/ChatGPT" title="ChatGPT">ChatGPT</a>, as it was trained on English-centric data, describes liberalism from the Anglo-American perspective, emphasizing aspects of human rights and equality, while equally valid aspects like "opposes state intervention in personal and economic life" from the dominant Vietnamese perspective and "limitation of government power" from the prevalent Chinese perspective are absent.<sup class="noprint Inline-Template noprint noexcerpt Template-Fact" style="white-space:nowrap;">&#91;<i><a href="/wiki/Wikipedia:NOTRS" class="mw-redirect" title="Wikipedia:NOTRS"><span title="The current source is insufficiently reliable, it is a preprint (WP:NOTRS). (January 2024)">better&#160;source&#160;needed</span></a></i>&#93;</sup><sup id="cite_ref-Luo-2023_48-0" class="reference"><a href="#cite_note-Luo-2023-48"><span class="cite-bracket">&#91;</span>48<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading4"><h4 id="Gender_bias">Gender bias</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=7" title="Edit section: Gender bias"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Large language models often reinforces <a href="/wiki/Gender_stereotypes" class="mw-redirect" title="Gender stereotypes">gender stereotypes</a>, assigning roles and characteristics based on traditional gender norms. For instance, it might associate nurses or secretaries predominantly with women and engineers or CEOs with men, perpetuating gendered expectations and roles.<sup id="cite_ref-49" class="reference"><a href="#cite_note-49"><span class="cite-bracket">&#91;</span>49<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-50" class="reference"><a href="#cite_note-50"><span class="cite-bracket">&#91;</span>50<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-51" class="reference"><a href="#cite_note-51"><span class="cite-bracket">&#91;</span>51<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading4"><h4 id="Political_bias">Political bias</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=8" title="Edit section: Political bias"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Language models may also exhibit political biases. Since the training data includes a wide range of political opinions and coverage, the models might generate responses that lean towards particular political ideologies or viewpoints, depending on the prevalence of those views in the data.<sup id="cite_ref-52" class="reference"><a href="#cite_note-52"><span class="cite-bracket">&#91;</span>52<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-53" class="reference"><a href="#cite_note-53"><span class="cite-bracket">&#91;</span>53<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading4"><h4 id="Stereotyping">Stereotyping</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=9" title="Edit section: Stereotyping"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Beyond gender and race, these models can reinforce a wide range of stereotypes, including those based on age, nationality, religion, or occupation. This can lead to outputs that unfairly generalize or caricature groups of people, sometimes in harmful or derogatory ways.<sup id="cite_ref-54" class="reference"><a href="#cite_note-54"><span class="cite-bracket">&#91;</span>54<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Dominance_by_tech_giants">Dominance by tech giants</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=10" title="Edit section: Dominance by tech giants"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The commercial AI scene is dominated by <a href="/wiki/Big_Tech" title="Big Tech">Big Tech</a> companies such as <a href="/wiki/Alphabet_Inc." title="Alphabet Inc.">Alphabet Inc.</a>, <a href="/wiki/Amazon_(company)" title="Amazon (company)">Amazon</a>, <a href="/wiki/Apple_Inc." title="Apple Inc.">Apple Inc.</a>, <a href="/wiki/Meta_Platforms" title="Meta Platforms">Meta Platforms</a>, and <a href="/wiki/Microsoft" title="Microsoft">Microsoft</a>.<sup id="cite_ref-55" class="reference"><a href="#cite_note-55"><span class="cite-bracket">&#91;</span>55<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-56" class="reference"><a href="#cite_note-56"><span class="cite-bracket">&#91;</span>56<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-57" class="reference"><a href="#cite_note-57"><span class="cite-bracket">&#91;</span>57<span class="cite-bracket">&#93;</span></a></sup> Some of these players already own the vast majority of existing <a href="/wiki/Cloud_computing" title="Cloud computing">cloud infrastructure</a> and <a href="/wiki/Computing" title="Computing">computing</a> power from <a href="/wiki/Data_center" title="Data center">data centers</a>, allowing them to entrench further in the marketplace.<sup id="cite_ref-58" class="reference"><a href="#cite_note-58"><span class="cite-bracket">&#91;</span>58<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-59" class="reference"><a href="#cite_note-59"><span class="cite-bracket">&#91;</span>59<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Open-source">Open-source</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=11" title="Edit section: Open-source"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p><a href="/wiki/Bill_Hibbard" title="Bill Hibbard">Bill Hibbard</a> argues that because AI will have such a profound effect on humanity, AI developers are representatives of future humanity and thus have an ethical obligation to be transparent in their efforts.<sup id="cite_ref-AGI-08a_60-0" class="reference"><a href="#cite_note-AGI-08a-60"><span class="cite-bracket">&#91;</span>60<span class="cite-bracket">&#93;</span></a></sup> Organizations like <a href="/wiki/Hugging_Face" title="Hugging Face">Hugging Face</a><sup id="cite_ref-61" class="reference"><a href="#cite_note-61"><span class="cite-bracket">&#91;</span>61<span class="cite-bracket">&#93;</span></a></sup> and <a href="/wiki/EleutherAI" title="EleutherAI">EleutherAI</a><sup id="cite_ref-62" class="reference"><a href="#cite_note-62"><span class="cite-bracket">&#91;</span>62<span class="cite-bracket">&#93;</span></a></sup> have been actively open-sourcing AI software. Various open-weight large language models have also been released, such as <a href="/wiki/Gemma_(language_model)" class="mw-redirect" title="Gemma (language model)">Gemma</a>, <a href="/wiki/LLaMA" class="mw-redirect" title="LLaMA">Llama2</a> and <a href="/wiki/Mistral_AI" title="Mistral AI">Mistral</a>.<sup id="cite_ref-63" class="reference"><a href="#cite_note-63"><span class="cite-bracket">&#91;</span>63<span class="cite-bracket">&#93;</span></a></sup> </p><p>However, making code open source does not make it comprehensible, which by many definitions means that the AI code is not transparent. The <a href="/wiki/IEEE_Standards_Association" title="IEEE Standards Association">IEEE Standards Association</a> has published a <a href="/wiki/Technical_standards" class="mw-redirect" title="Technical standards">technical standard</a> on Transparency of Autonomous Systems: IEEE 7001-2021.<sup id="cite_ref-P7001_64-0" class="reference"><a href="#cite_note-P7001-64"><span class="cite-bracket">&#91;</span>64<span class="cite-bracket">&#93;</span></a></sup> The IEEE effort identifies multiple scales of transparency for different stakeholders. </p><p>There are also concerns that releasing AI models may lead to misuse.<sup id="cite_ref-65" class="reference"><a href="#cite_note-65"><span class="cite-bracket">&#91;</span>65<span class="cite-bracket">&#93;</span></a></sup> For example, Microsoft has expressed concern about allowing universal access to its face recognition software, even for those who can pay for it. Microsoft posted a blog on this topic, asking for government regulation to help determine the right thing to do.<sup id="cite_ref-WiredMS_66-0" class="reference"><a href="#cite_note-WiredMS-66"><span class="cite-bracket">&#91;</span>66<span class="cite-bracket">&#93;</span></a></sup> Furthermore, open-weight AI models can be <a href="/wiki/Fine-tuning_(deep_learning)" title="Fine-tuning (deep learning)">fine-tuned</a> to remove any counter-measure, until the AI model complies with dangerous requests, without any filtering. This could be particularly concerning for future AI models, for example if they get the ability to create <a href="/wiki/Bioweapons" class="mw-redirect" title="Bioweapons">bioweapons</a> or to automate <a href="/wiki/Cyberattack" title="Cyberattack">cyberattacks</a>.<sup id="cite_ref-67" class="reference"><a href="#cite_note-67"><span class="cite-bracket">&#91;</span>67<span class="cite-bracket">&#93;</span></a></sup> <a href="/wiki/OpenAI" title="OpenAI">OpenAI</a>, initially committed to an open-source approach to the development of <a href="/wiki/Artificial_general_intelligence" title="Artificial general intelligence">artificial general intelligence</a> (AGI), eventually switched to a closed-source approach, citing competitiveness and safety reasons. <a href="/wiki/Ilya_Sutskever" title="Ilya Sutskever">Ilya Sutskever</a>, OpenAI's former chief AGI scientist, said in 2023 "we were wrong", expecting that the safety reasons for not open-sourcing the most potent AI models will become "obvious" in a few years.<sup id="cite_ref-68" class="reference"><a href="#cite_note-68"><span class="cite-bracket">&#91;</span>68<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Transparency">Transparency</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=12" title="Edit section: Transparency"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Approaches like machine learning with <a href="/wiki/Neural_network" title="Neural network">neural networks</a> can result in computers making decisions that neither they nor their developers can explain. It is difficult for people to determine if such decisions are fair and trustworthy, leading potentially to bias in AI systems going undetected, or people rejecting the use of such systems. This has led to advocacy and in some jurisdictions legal requirements for <a href="/wiki/Explainable_artificial_intelligence" title="Explainable artificial intelligence">explainable artificial intelligence</a>.<sup id="cite_ref-69" class="reference"><a href="#cite_note-69"><span class="cite-bracket">&#91;</span>69<span class="cite-bracket">&#93;</span></a></sup> Explainable artificial intelligence encompasses both explainability and interpretability, with explainability relating to summarizing neural network behavior and building user confidence, while interpretability is defined as the comprehension of what a model has done or could do.<sup id="cite_ref-70" class="reference"><a href="#cite_note-70"><span class="cite-bracket">&#91;</span>70<span class="cite-bracket">&#93;</span></a></sup> </p><p>In healthcare, the use of complex AI methods or techniques often results in models described as "<a href="/wiki/Black_box" title="Black box">black-boxes</a>" due to the difficulty to understand how they work. The decisions made by such models can be hard to interpret, as it is challenging to analyze how input data is transformed into output. This lack of transparency is a significant concern in fields like healthcare, where understanding the rationale behind decisions can be crucial for trust, ethical considerations, and compliance with regulatory standards.<sup id="cite_ref-71" class="reference"><a href="#cite_note-71"><span class="cite-bracket">&#91;</span>71<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Accountability">Accountability</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=13" title="Edit section: Accountability"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>A special case of the opaqueness of AI is that caused by it being <a href="/wiki/Anthropomorphised" class="mw-redirect" title="Anthropomorphised">anthropomorphised</a>, that is, assumed to have human-like characteristics, resulting in misplaced conceptions of its <a href="/wiki/Moral_agency" title="Moral agency">moral agency</a>.<sup class="noprint Inline-Template" style="white-space:nowrap;">&#91;<i><a href="/wiki/Wikipedia:Accuracy_dispute#Disputed_statement" title="Wikipedia:Accuracy dispute"><span title="Unclear why AIs couldn&#39;t have moral agency. Also unclear whether attributing it moral agency is a special case of opaqueness, and whether that would prevent people from attributing the responsibility of incidents to the company that developed it. (April 2024)">dubious</span></a>&#32;&#8211; <a href="/wiki/Talk:Ethics_of_artificial_intelligence#Dubious" title="Talk:Ethics of artificial intelligence">discuss</a></i>&#93;</sup> This can cause people to overlook whether either human <a href="/wiki/Negligence" title="Negligence">negligence</a> or deliberate criminal action has led to unethical outcomes produced through an AI system. Some recent <a href="/wiki/Digital_governance" class="mw-redirect" title="Digital governance">digital governance</a> regulation, such as the <a href="/wiki/EU" class="mw-redirect" title="EU">EU</a>'s <a href="/wiki/AI_Act" class="mw-redirect" title="AI Act">AI Act</a> is set out to rectify this, by ensuring that AI systems are treated with at least as much care as one would expect under ordinary <a href="/wiki/Product_liability" title="Product liability">product liability</a>. This includes potentially <a href="/wiki/Information_technology_audit" title="Information technology audit">AI audits</a>. </p> <div class="mw-heading mw-heading3"><h3 id="Regulation">Regulation</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=14" title="Edit section: Regulation"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Regulation_of_artificial_intelligence" title="Regulation of artificial intelligence">Regulation of artificial intelligence</a></div> <p>According to a 2019 report from the Center for the Governance of AI at the University of Oxford, 82% of Americans believe that robots and AI should be carefully managed. Concerns cited ranged from how AI is used in surveillance and in spreading fake content online (known as deep fakes when they include doctored video images and audio generated with help from AI) to cyberattacks, infringements on data privacy, hiring bias, autonomous vehicles, and drones that do not require a human controller.<sup id="cite_ref-72" class="reference"><a href="#cite_note-72"><span class="cite-bracket">&#91;</span>72<span class="cite-bracket">&#93;</span></a></sup> Similarly, according to a five-country study by KPMG and the <a href="/wiki/University_of_Queensland" title="University of Queensland">University of Queensland</a> Australia in 2021, 66-79% of citizens in each country believe that the impact of AI on society is uncertain and unpredictable; 96% of those surveyed expect AI governance challenges to be managed carefully.<sup id="cite_ref-73" class="reference"><a href="#cite_note-73"><span class="cite-bracket">&#91;</span>73<span class="cite-bracket">&#93;</span></a></sup> </p><p>Not only companies, but many other researchers and citizen advocates recommend government regulation as a means of ensuring transparency, and through it, human accountability. This strategy has proven controversial, as some worry that it will slow the rate of innovation. Others argue that regulation leads to systemic stability more able to support innovation in the long term.<sup id="cite_ref-DeloitteGDPR_74-0" class="reference"><a href="#cite_note-DeloitteGDPR-74"><span class="cite-bracket">&#91;</span>74<span class="cite-bracket">&#93;</span></a></sup> The <a href="/wiki/OECD" title="OECD">OECD</a>, <a href="/wiki/UN" class="mw-redirect" title="UN">UN</a>, <a href="/wiki/EU" class="mw-redirect" title="EU">EU</a>, and many countries are presently working on strategies for regulating AI, and finding appropriate legal frameworks.<sup id="cite_ref-75" class="reference"><a href="#cite_note-75"><span class="cite-bracket">&#91;</span>75<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-76" class="reference"><a href="#cite_note-76"><span class="cite-bracket">&#91;</span>76<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-77" class="reference"><a href="#cite_note-77"><span class="cite-bracket">&#91;</span>77<span class="cite-bracket">&#93;</span></a></sup> </p><p>On June 26, 2019, the European Commission High-Level Expert Group on Artificial Intelligence (AI HLEG) published its "Policy and investment recommendations for trustworthy Artificial Intelligence".<sup id="cite_ref-78" class="reference"><a href="#cite_note-78"><span class="cite-bracket">&#91;</span>78<span class="cite-bracket">&#93;</span></a></sup> This is the AI HLEG's second deliverable, after the April 2019 publication of the "Ethics Guidelines for Trustworthy AI". The June AI HLEG recommendations cover four principal subjects: humans and society at large, research and academia, the private sector, and the public sector.<sup id="cite_ref-79" class="reference"><a href="#cite_note-79"><span class="cite-bracket">&#91;</span>79<span class="cite-bracket">&#93;</span></a></sup> The European Commission claims that "HLEG's recommendations reflect an appreciation of both the opportunities for AI technologies to drive economic growth, prosperity and innovation, as well as the potential risks involved" and states that the EU aims to lead on the framing of policies governing AI internationally.<sup id="cite_ref-80" class="reference"><a href="#cite_note-80"><span class="cite-bracket">&#91;</span>80<span class="cite-bracket">&#93;</span></a></sup> To prevent harm, in addition to regulation, AI-deploying organizations need to play a central role in creating and deploying trustworthy AI in line with the principles of trustworthy AI, and take accountability to mitigate the risks.<sup id="cite_ref-81" class="reference"><a href="#cite_note-81"><span class="cite-bracket">&#91;</span>81<span class="cite-bracket">&#93;</span></a></sup> On 21 April 2021, the European Commission proposed the <a href="/wiki/Artificial_Intelligence_Act" title="Artificial Intelligence Act">Artificial Intelligence Act</a>.<sup id="cite_ref-Financial_Times-2021_82-0" class="reference"><a href="#cite_note-Financial_Times-2021-82"><span class="cite-bracket">&#91;</span>82<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Emergent_or_potential_future_challenges">Emergent or potential future challenges</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=15" title="Edit section: Emergent or potential future challenges"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <div class="mw-heading mw-heading3"><h3 id="Increasing_use">Increasing use</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=16" title="Edit section: Increasing use"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>AI has been slowly making its presence more known throughout the world, from chat bots that seemingly have answers for every homework question to <a href="/wiki/Generative_artificial_intelligence" title="Generative artificial intelligence">Generative artificial intelligence</a> that can create a painting about whatever one desires. AI has become increasingly popular in hiring markets, from the ads that target certain people according to what they are looking for to the inspection of applications of potential hires. Events, such as <a href="/wiki/COVID-19" title="COVID-19">COVID-19</a>, has only sped up the adoption of AI programs in the application process, due to more people having to apply electronically, and with this increase in online applicants the use of AI made the process of narrowing down potential employees easier and more efficient. AI has become more prominent as businesses have to keep up with the times and ever-expanding internet. Processing analytics and making decisions becomes much easier with the help of AI.<sup id="cite_ref-Spindler-20232_42-1" class="reference"><a href="#cite_note-Spindler-20232-42"><span class="cite-bracket">&#91;</span>42<span class="cite-bracket">&#93;</span></a></sup> As <a href="/wiki/Tensor_Processing_Unit" title="Tensor Processing Unit">Tensor Processing Unit</a> (TPUs) and <a href="/wiki/Graphics_processing_unit" title="Graphics processing unit">Graphics processing unit</a> (GPUs) become more powerful, AI capabilities also increase, forcing companies to use it to keep up with the competition. Managing customers' needs and automating many parts of the workplace leads to companies having to spend less money on employees. </p><p>AI has also seen increased usage in criminal justice and healthcare. For medicinal means, AI is being used more often to analyze patient data to make predictions about future patients' conditions and possible treatments. These programs are called <a href="/wiki/Clinical_decision_support_system" title="Clinical decision support system">Clinical decision support system</a> (DSS). AI's future in healthcare may develop into something further than just recommended treatments, such as referring certain patients over others, leading to the possibility of inequalities.<sup id="cite_ref-83" class="reference"><a href="#cite_note-83"><span class="cite-bracket">&#91;</span>83<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Robot_rights">Robot rights</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=17" title="Edit section: Robot rights"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <figure class="mw-default-size" typeof="mw:File/Thumb"><a href="/wiki/File:Hospital_delivery_robot_having_priority_to_elevators.jpg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/3/34/Hospital_delivery_robot_having_priority_to_elevators.jpg/220px-Hospital_delivery_robot_having_priority_to_elevators.jpg" decoding="async" width="220" height="279" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/3/34/Hospital_delivery_robot_having_priority_to_elevators.jpg/330px-Hospital_delivery_robot_having_priority_to_elevators.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/3/34/Hospital_delivery_robot_having_priority_to_elevators.jpg/440px-Hospital_delivery_robot_having_priority_to_elevators.jpg 2x" data-file-width="2385" data-file-height="3022" /></a><figcaption>A hospital <a href="/wiki/Delivery_robot" title="Delivery robot">delivery robot</a> in front of elevator doors stating "Robot Has Priority", a situation that may be regarded as <a href="/wiki/Reverse_discrimination" title="Reverse discrimination">reverse discrimination</a> in relation to humans</figcaption></figure> <p>"Robot rights" is the concept that people should have moral obligations towards their machines, akin to <a href="/wiki/Human_rights" title="Human rights">human rights</a> or <a href="/wiki/Animal_rights" title="Animal rights">animal rights</a>.<sup id="cite_ref-84" class="reference"><a href="#cite_note-84"><span class="cite-bracket">&#91;</span>84<span class="cite-bracket">&#93;</span></a></sup> It has been suggested that robot rights (such as a right to exist and perform its own mission) could be linked to robot duty to serve humanity, analogous to linking human rights with human duties before society.<sup id="cite_ref-85" class="reference"><a href="#cite_note-85"><span class="cite-bracket">&#91;</span>85<span class="cite-bracket">&#93;</span></a></sup> A specific issue to consider is whether copyright ownership may be claimed.<sup id="cite_ref-86" class="reference"><a href="#cite_note-86"><span class="cite-bracket">&#91;</span>86<span class="cite-bracket">&#93;</span></a></sup> The issue has been considered by the <a href="/wiki/Institute_for_the_Future" title="Institute for the Future">Institute for the Future</a><sup id="cite_ref-87" class="reference"><a href="#cite_note-87"><span class="cite-bracket">&#91;</span>87<span class="cite-bracket">&#93;</span></a></sup> and by the <a href="/wiki/Department_of_Trade_and_Industry_(United_Kingdom)" title="Department of Trade and Industry (United Kingdom)">U.K. Department of Trade and Industry</a>.<sup id="cite_ref-TimesOnline_88-0" class="reference"><a href="#cite_note-TimesOnline-88"><span class="cite-bracket">&#91;</span>88<span class="cite-bracket">&#93;</span></a></sup> </p><p>In October 2017, the android <a href="/wiki/Sophia_(robot)" title="Sophia (robot)">Sophia</a> was granted citizenship in <a href="/wiki/Saudi_Arabia" title="Saudi Arabia">Saudi Arabia</a>, though some considered this to be more of a publicity stunt than a meaningful legal recognition.<sup id="cite_ref-89" class="reference"><a href="#cite_note-89"><span class="cite-bracket">&#91;</span>89<span class="cite-bracket">&#93;</span></a></sup> Some saw this gesture as openly denigrating of <a href="/wiki/Human_rights" title="Human rights">human rights</a> and the <a href="/wiki/Rule_of_law" title="Rule of law">rule of law</a>.<sup id="cite_ref-90" class="reference"><a href="#cite_note-90"><span class="cite-bracket">&#91;</span>90<span class="cite-bracket">&#93;</span></a></sup> </p><p>The philosophy of <a href="/wiki/Sentientism" title="Sentientism">sentientism</a> grants degrees of moral consideration to all sentient beings, primarily humans and most non-human animals. If artificial or alien intelligence show evidence of being <a href="/wiki/Sentience" title="Sentience">sentient</a>, this philosophy holds that they should be shown compassion and granted rights. </p><p><a href="/wiki/Joanna_Bryson" title="Joanna Bryson">Joanna Bryson</a> has argued that creating AI that requires rights is both avoidable, and would in itself be unethical, both as a burden to the AI agents and to human society.<sup id="cite_ref-91" class="reference"><a href="#cite_note-91"><span class="cite-bracket">&#91;</span>91<span class="cite-bracket">&#93;</span></a></sup> Pressure groups to recognise 'robot rights' significantly hinder the establishment of robust international safety regulations.<sup class="noprint Inline-Template Template-Fact" style="white-space:nowrap;">&#91;<i><a href="/wiki/Wikipedia:Citation_needed" title="Wikipedia:Citation needed"><span title="This claim needs references to reliable sources. (June 2024)">citation needed</span></a></i>&#93;</sup> </p> <div class="mw-heading mw-heading3"><h3 id="AI_welfare">AI welfare</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=18" title="Edit section: AI welfare"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>In 2020, professor Shimon Edelman noted that only a small portion of work in the rapidly growing field of AI ethics addressed the possibility of AIs experiencing suffering. This was despite credible theories having outlined possible ways by which AI systems may become conscious, such as the <a href="/wiki/Global_workspace_theory" title="Global workspace theory">global workspace theory</a> or the <a href="/wiki/Integrated_information_theory" title="Integrated information theory">integrated information theory</a>. Edelman notes one exception had been <a href="/wiki/Thomas_Metzinger" title="Thomas Metzinger">Thomas Metzinger</a>, who in 2018 called for a global moratorium on further work that risked creating conscious AIs. The moratorium was to run to 2050 and could be either extended or repealed early, depending on progress in better understanding the risks and how to mitigate them. Metzinger repeated this argument in 2021, highlighting the risk of creating an "<a href="/wiki/Suffering_risks" class="mw-redirect" title="Suffering risks">explosion of artificial suffering</a>", both as an AI might suffer in intense ways that humans could not understand, and as replication processes may see the creation of huge quantities of conscious instances. </p><p>Several labs have openly stated they are trying to create conscious AIs. There have been reports from those with close access to AIs not openly intended to be self aware, that consciousness may already have unintentionally emerged.<sup id="cite_ref-92" class="reference"><a href="#cite_note-92"><span class="cite-bracket">&#91;</span>92<span class="cite-bracket">&#93;</span></a></sup> These include <a href="/wiki/OpenAI" title="OpenAI">OpenAI</a> founder <a href="/wiki/Ilya_Sutskever" title="Ilya Sutskever">Ilya Sutskever</a> in February 2022, when he wrote that today's large neural nets may be "slightly conscious". In November 2022, <a href="/wiki/David_Chalmers" title="David Chalmers">David Chalmers</a> argued that it was unlikely current large language models like <a href="/wiki/GPT-3" title="GPT-3">GPT-3</a> had experienced consciousness, but also that he considered there to be a serious possibility that large language models may become conscious in the future.<sup id="cite_ref-93" class="reference"><a href="#cite_note-93"><span class="cite-bracket">&#91;</span>93<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-94" class="reference"><a href="#cite_note-94"><span class="cite-bracket">&#91;</span>94<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-95" class="reference"><a href="#cite_note-95"><span class="cite-bracket">&#91;</span>95<span class="cite-bracket">&#93;</span></a></sup> In the <a href="/wiki/Ethics_of_uncertain_sentience" title="Ethics of uncertain sentience">ethics of uncertain sentience</a>, the <a href="/wiki/Precautionary_principle" title="Precautionary principle">precautionary principle</a> is often invoked.<sup id="cite_ref-96" class="reference"><a href="#cite_note-96"><span class="cite-bracket">&#91;</span>96<span class="cite-bracket">&#93;</span></a></sup> </p><p>According to Carl Shulman and <a href="/wiki/Nick_Bostrom" title="Nick Bostrom">Nick Bostrom</a>, it may be possible to create machines that would be "superhumanly efficient at deriving well-being from resources", called "super-beneficiaries". One reason for this is that digital hardware could enable much faster information processing than biological brains, leading to a faster rate of <a href="/wiki/Subjective_experience" class="mw-redirect" title="Subjective experience">subjective experience</a>. These machines could also be engineered to feel intense and positive subjective experience, unaffected by the <a href="/wiki/Hedonic_treadmill" title="Hedonic treadmill">hedonic treadmill</a>. Shulman and Bostrom caution that failing to appropriately consider the moral claims of digital minds could lead to a moral catastrophe, while uncritically prioritizing them over human interests could be detrimental to humanity.<sup id="cite_ref-97" class="reference"><a href="#cite_note-97"><span class="cite-bracket">&#91;</span>97<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-98" class="reference"><a href="#cite_note-98"><span class="cite-bracket">&#91;</span>98<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Threat_to_human_dignity">Threat to human dignity</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=19" title="Edit section: Threat to human dignity"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Computer_Power_and_Human_Reason" title="Computer Power and Human Reason">Computer Power and Human Reason</a></div> <p><a href="/wiki/Joseph_Weizenbaum" title="Joseph Weizenbaum">Joseph Weizenbaum</a><sup id="cite_ref-Weizenbaum&#39;s_critique_99-0" class="reference"><a href="#cite_note-Weizenbaum&#39;s_critique-99"><span class="cite-bracket">&#91;</span>99<span class="cite-bracket">&#93;</span></a></sup> argued in 1976 that AI technology should not be used to replace people in positions that require respect and care, such as: </p> <ul><li>A customer service representative (AI technology is already used today for telephone-based <a href="/wiki/Interactive_voice_response" title="Interactive voice response">interactive voice response</a> systems)</li> <li>A nursemaid for the elderly (as was reported by <a href="/wiki/Pamela_McCorduck" title="Pamela McCorduck">Pamela McCorduck</a> in her book <i>The Fifth Generation</i>)</li> <li>A soldier</li> <li>A judge</li> <li>A police officer</li> <li>A therapist (as was proposed by <a href="/wiki/Kenneth_Colby" title="Kenneth Colby">Kenneth Colby</a> in the 70s)</li></ul> <p>Weizenbaum explains that we require authentic feelings of <a href="/wiki/Empathy" title="Empathy">empathy</a> from people in these positions. If machines replace them, we will find ourselves alienated, devalued and frustrated, for the artificially intelligent system would not be able to simulate empathy. Artificial intelligence, if used in this way, represents a threat to human dignity. Weizenbaum argues that the fact that we are entertaining the possibility of machines in these positions suggests that we have experienced an "atrophy of the human spirit that comes from thinking of ourselves as computers."<sup id="cite_ref-MWZ_100-0" class="reference"><a href="#cite_note-MWZ-100"><span class="cite-bracket">&#91;</span>100<span class="cite-bracket">&#93;</span></a></sup> </p><p><a href="/wiki/Pamela_McCorduck" title="Pamela McCorduck">Pamela McCorduck</a> counters that, speaking for women and minorities "I'd rather take my chances with an impartial computer", pointing out that there are conditions where we would prefer to have automated judges and police that have no personal agenda at all.<sup id="cite_ref-MWZ_100-1" class="reference"><a href="#cite_note-MWZ-100"><span class="cite-bracket">&#91;</span>100<span class="cite-bracket">&#93;</span></a></sup> However, <a href="/wiki/Andreas_Kaplan" title="Andreas Kaplan">Kaplan</a> and Haenlein stress that AI systems are only as smart as the data used to train them since they are, in their essence, nothing more than fancy curve-fitting machines; using AI to support a court ruling can be highly problematic if past rulings show bias toward certain groups since those biases get formalized and ingrained, which makes them even more difficult to spot and fight against.<sup id="cite_ref-101" class="reference"><a href="#cite_note-101"><span class="cite-bracket">&#91;</span>101<span class="cite-bracket">&#93;</span></a></sup> </p><p>Weizenbaum was also bothered that AI researchers (and some philosophers) were willing to view the human mind as nothing more than a computer program (a position now known as <a href="/wiki/Computationalism" class="mw-redirect" title="Computationalism">computationalism</a>). To Weizenbaum, these points suggest that AI research devalues human life.<sup id="cite_ref-Weizenbaum&#39;s_critique_99-1" class="reference"><a href="#cite_note-Weizenbaum&#39;s_critique-99"><span class="cite-bracket">&#91;</span>99<span class="cite-bracket">&#93;</span></a></sup> </p><p>AI founder <a href="/wiki/John_McCarthy_(computer_scientist)" title="John McCarthy (computer scientist)">John McCarthy</a> objects to the moralizing tone of Weizenbaum's critique. "When moralizing is both vehement and vague, it invites authoritarian abuse," he writes. <a href="/wiki/Bill_Hibbard" title="Bill Hibbard">Bill Hibbard</a><sup id="cite_ref-hibbard_2014_102-0" class="reference"><a href="#cite_note-hibbard_2014-102"><span class="cite-bracket">&#91;</span>102<span class="cite-bracket">&#93;</span></a></sup> writes that "Human dignity requires that we strive to remove our ignorance of the nature of existence, and AI is necessary for that striving." </p> <div class="mw-heading mw-heading3"><h3 id="Liability_for_self-driving_cars">Liability for self-driving cars</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=20" title="Edit section: Liability for self-driving cars"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Self-driving_car_liability" title="Self-driving car liability">Self-driving car liability</a></div> <p>As the widespread use of <a href="/wiki/Self-driving_car" title="Self-driving car">autonomous cars</a> becomes increasingly imminent, new challenges raised by fully autonomous vehicles must be addressed.<sup id="cite_ref-103" class="reference"><a href="#cite_note-103"><span class="cite-bracket">&#91;</span>103<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-104" class="reference"><a href="#cite_note-104"><span class="cite-bracket">&#91;</span>104<span class="cite-bracket">&#93;</span></a></sup> There have been debates about the legal liability of the responsible party if these cars get into accidents.<sup id="cite_ref-105" class="reference"><a href="#cite_note-105"><span class="cite-bracket">&#91;</span>105<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-106" class="reference"><a href="#cite_note-106"><span class="cite-bracket">&#91;</span>106<span class="cite-bracket">&#93;</span></a></sup> In one report where a driverless car hit a pedestrian, the driver was inside the car but the controls were fully in the hand of computers. This led to a dilemma over who was at fault for the accident.<sup id="cite_ref-107" class="reference"><a href="#cite_note-107"><span class="cite-bracket">&#91;</span>107<span class="cite-bracket">&#93;</span></a></sup> </p><p>In another incident on March 18, 2018, <a href="/wiki/Elaine_Herzberg" class="mw-redirect" title="Elaine Herzberg">Elaine Herzberg</a> was struck and killed by a self-driving <a href="/wiki/Uber" title="Uber">Uber</a> in Arizona. In this case, the automated car was capable of detecting cars and certain obstacles in order to autonomously navigate the roadway, but it could not anticipate a pedestrian in the middle of the road. This raised the question of whether the driver, pedestrian, the car company, or the government should be held responsible for her death.<sup id="cite_ref-108" class="reference"><a href="#cite_note-108"><span class="cite-bracket">&#91;</span>108<span class="cite-bracket">&#93;</span></a></sup> </p><p>Currently, self-driving cars are considered semi-autonomous, requiring the driver to pay attention and be prepared to take control if necessary.<sup id="cite_ref-109" class="reference"><a href="#cite_note-109"><span class="cite-bracket">&#91;</span>109<span class="cite-bracket">&#93;</span></a></sup><sup class="noprint Inline-Template" style="white-space:nowrap;">&#91;<i><a href="/wiki/Wikipedia:Verifiability" title="Wikipedia:Verifiability"><span title="The material near this tag failed verification of its source citation(s). (November 2020)">failed verification</span></a></i>&#93;</sup> Thus, it falls on governments to regulate the driver who over-relies on autonomous features. as well educate them that these are just technologies that, while convenient, are not a complete substitute. Before autonomous cars become widely used, these issues need to be tackled through new policies.<sup id="cite_ref-110" class="reference"><a href="#cite_note-110"><span class="cite-bracket">&#91;</span>110<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-111" class="reference"><a href="#cite_note-111"><span class="cite-bracket">&#91;</span>111<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-112" class="reference"><a href="#cite_note-112"><span class="cite-bracket">&#91;</span>112<span class="cite-bracket">&#93;</span></a></sup> </p><p>Experts contend that autonomous vehicles ought to be able to distinguish between rightful and harmful decisions since they have the potential of inflicting harm.<sup id="cite_ref-113" class="reference"><a href="#cite_note-113"><span class="cite-bracket">&#91;</span>113<span class="cite-bracket">&#93;</span></a></sup> The two main approaches proposed to enable smart machines to render moral decisions are the bottom-up approach, which suggests that machines should learn ethical decisions by observing human behavior without the need for formal rules or moral philosophies, and the top-down approach, which involves programming specific ethical principles into the machine's guidance system. However, there are significant challenges facing both strategies: the top-down technique is criticized for its difficulty in preserving certain moral convictions, while the bottom-up strategy is questioned for potentially unethical learning from human activities. </p> <div class="mw-heading mw-heading3"><h3 id="Weaponization">Weaponization</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=21" title="Edit section: Weaponization"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Lethal_autonomous_weapon" title="Lethal autonomous weapon">Lethal autonomous weapon</a></div> <p>Some experts and academics have questioned the use of robots for military combat, especially when such robots are given some degree of autonomous functions.<sup id="cite_ref-114" class="reference"><a href="#cite_note-114"><span class="cite-bracket">&#91;</span>114<span class="cite-bracket">&#93;</span></a></sup> The US Navy has funded a report which indicates that as military robots become more complex, there should be greater attention to implications of their ability to make autonomous decisions.<sup id="cite_ref-115" class="reference"><a href="#cite_note-115"><span class="cite-bracket">&#91;</span>115<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-engadget.com_116-0" class="reference"><a href="#cite_note-engadget.com-116"><span class="cite-bracket">&#91;</span>116<span class="cite-bracket">&#93;</span></a></sup> The President of the <a href="/wiki/Association_for_the_Advancement_of_Artificial_Intelligence" title="Association for the Advancement of Artificial Intelligence">Association for the Advancement of Artificial Intelligence</a> has commissioned a study to look at this issue.<sup id="cite_ref-117" class="reference"><a href="#cite_note-117"><span class="cite-bracket">&#91;</span>117<span class="cite-bracket">&#93;</span></a></sup> They point to programs like the Language Acquisition Device which can emulate human interaction. </p><p>On October 31, 2019, the United States Department of Defense's Defense Innovation Board published the draft of a report recommending principles for the ethical use of artificial intelligence by the Department of Defense that would ensure a human operator would always be able to look into the '<a href="/wiki/Black_box" title="Black box">black box</a>' and understand the kill-chain process. However, a major concern is how the report will be implemented.<sup id="cite_ref-118" class="reference"><a href="#cite_note-118"><span class="cite-bracket">&#91;</span>118<span class="cite-bracket">&#93;</span></a></sup> The US Navy has funded a report which indicates that as <a href="/wiki/Military_robots" class="mw-redirect" title="Military robots">military robots</a> become more complex, there should be greater attention to implications of their ability to make autonomous decisions.<sup id="cite_ref-119" class="reference"><a href="#cite_note-119"><span class="cite-bracket">&#91;</span>119<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-engadget.com_116-1" class="reference"><a href="#cite_note-engadget.com-116"><span class="cite-bracket">&#91;</span>116<span class="cite-bracket">&#93;</span></a></sup> Some researchers state that <a href="/wiki/Autonomous_robot" title="Autonomous robot">autonomous robots</a> might be more humane, as they could make decisions more effectively.<sup id="cite_ref-120" class="reference"><a href="#cite_note-120"><span class="cite-bracket">&#91;</span>120<span class="cite-bracket">&#93;</span></a></sup> In 2024, the <a href="/wiki/DARPA" title="DARPA">Defense Advanced Research Projects Agency</a> funded a program, <i>Autonomy Standards and Ideals with Military Operational Values</i> (ASIMOV), to develop metrics for evaluating the ethical implications of autonomous weapon systems by testing communities.<sup id="cite_ref-121" class="reference"><a href="#cite_note-121"><span class="cite-bracket">&#91;</span>121<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-122" class="reference"><a href="#cite_note-122"><span class="cite-bracket">&#91;</span>122<span class="cite-bracket">&#93;</span></a></sup> </p><p>Research has studied how to make autonomous power with the ability to learn using assigned moral responsibilities. "The results may be used when designing future military robots, to control unwanted tendencies to assign responsibility to the robots."<sup id="cite_ref-123" class="reference"><a href="#cite_note-123"><span class="cite-bracket">&#91;</span>123<span class="cite-bracket">&#93;</span></a></sup> From a <a href="/wiki/Consequentialism" title="Consequentialism">consequentialist</a> view, there is a chance that robots will develop the ability to make their own logical decisions on whom to kill and that is why there should be a set <a href="/wiki/Morality" title="Morality">moral</a> framework that the AI cannot override.<sup id="cite_ref-124" class="reference"><a href="#cite_note-124"><span class="cite-bracket">&#91;</span>124<span class="cite-bracket">&#93;</span></a></sup> </p><p>There has been a recent outcry with regard to the engineering of artificial intelligence weapons that have included ideas of a <a href="/wiki/AI_takeover" title="AI takeover">robot takeover of mankind</a>. AI weapons do present a type of danger different from that of human-controlled weapons. Many governments have begun to fund programs to develop AI weaponry. The United States Navy recently announced plans to develop <a href="/wiki/Unmanned_combat_aerial_vehicle" title="Unmanned combat aerial vehicle">autonomous drone weapons</a>, paralleling similar announcements by Russia and South Korea<sup id="cite_ref-125" class="reference"><a href="#cite_note-125"><span class="cite-bracket">&#91;</span>125<span class="cite-bracket">&#93;</span></a></sup> respectively. Due to the potential of AI weapons becoming more dangerous than human-operated weapons, <a href="/wiki/Stephen_Hawking" title="Stephen Hawking">Stephen Hawking</a> and <a href="/wiki/Max_Tegmark" title="Max Tegmark">Max Tegmark</a> signed a "Future of Life" petition<sup id="cite_ref-126" class="reference"><a href="#cite_note-126"><span class="cite-bracket">&#91;</span>126<span class="cite-bracket">&#93;</span></a></sup> to ban AI weapons. The message posted by Hawking and Tegmark states that AI weapons pose an immediate danger and that action is required to avoid catastrophic disasters in the near future.<sup id="cite_ref-theatlantic.com_127-0" class="reference"><a href="#cite_note-theatlantic.com-127"><span class="cite-bracket">&#91;</span>127<span class="cite-bracket">&#93;</span></a></sup> </p><p>"If any major military power pushes ahead with the AI weapon development, a global <a href="/wiki/Arms_race" title="Arms race">arms race</a> is virtually inevitable, and the endpoint of this technological trajectory is obvious: autonomous weapons will become the <a href="/wiki/AK-47" title="AK-47">Kalashnikovs</a> of tomorrow", says the petition, which includes <a href="/wiki/Skype" title="Skype">Skype</a> co-founder <a href="/wiki/Jaan_Tallinn" title="Jaan Tallinn">Jaan Tallinn</a> and MIT professor of linguistics <a href="/wiki/Noam_Chomsky" title="Noam Chomsky">Noam Chomsky</a> as additional supporters against AI weaponry.<sup id="cite_ref-128" class="reference"><a href="#cite_note-128"><span class="cite-bracket">&#91;</span>128<span class="cite-bracket">&#93;</span></a></sup> </p><p>Physicist and Astronomer Royal <a href="/wiki/Sir_Martin_Rees" class="mw-redirect" title="Sir Martin Rees">Sir Martin Rees</a> has warned of catastrophic instances like "dumb robots going rogue or a network that develops a mind of its own." <a href="/wiki/Huw_Price" title="Huw Price">Huw Price</a>, a colleague of Rees at Cambridge, has voiced a similar warning that humans might not survive when intelligence "escapes the constraints of biology". These two professors created the <a href="/wiki/Centre_for_the_Study_of_Existential_Risk" title="Centre for the Study of Existential Risk">Centre for the Study of Existential Risk</a> at Cambridge University in the hope of avoiding this threat to human existence.<sup id="cite_ref-theatlantic.com_127-1" class="reference"><a href="#cite_note-theatlantic.com-127"><span class="cite-bracket">&#91;</span>127<span class="cite-bracket">&#93;</span></a></sup> </p><p>Regarding the potential for smarter-than-human systems to be employed militarily, the <a href="/wiki/Open_Philanthropy_Project" class="mw-redirect" title="Open Philanthropy Project">Open Philanthropy Project</a> writes that these scenarios "seem potentially as important as the risks related to loss of control", but research investigating AI's long-run social impact have spent relatively little time on this concern: "this class of scenarios has not been a major focus for the organizations that have been most active in this space, such as the <a href="/wiki/Machine_Intelligence_Research_Institute" title="Machine Intelligence Research Institute">Machine Intelligence Research Institute</a> (MIRI) and the <a href="/wiki/Future_of_Humanity_Institute" title="Future of Humanity Institute">Future of Humanity Institute</a> (FHI), and there seems to have been less analysis and debate regarding them".<sup id="cite_ref-129" class="reference"><a href="#cite_note-129"><span class="cite-bracket">&#91;</span>129<span class="cite-bracket">&#93;</span></a></sup> </p><p>Academic Gao Qiqi writes that military use of AI risks escalating military competition between countries and that the impact of AI in military matters will not be limited to one country but will have spillover effects.<sup id="cite_ref-:023_130-0" class="reference"><a href="#cite_note-:023-130"><span class="cite-bracket">&#91;</span>130<span class="cite-bracket">&#93;</span></a></sup><sup class="reference nowrap"><span title="Page: 91">&#58;&#8202;91&#8202;</span></sup> Gao cites the example of U.S. military use of AI, which he contends has been used as a scapegoat to evade accountability for decision-making.<sup id="cite_ref-:023_130-1" class="reference"><a href="#cite_note-:023-130"><span class="cite-bracket">&#91;</span>130<span class="cite-bracket">&#93;</span></a></sup><sup class="reference nowrap"><span title="Page: 91">&#58;&#8202;91&#8202;</span></sup> </p><p>A <a href="/wiki/Summit_on_Responsible_Artificial_Intelligence_in_the_Military_Domain" title="Summit on Responsible Artificial Intelligence in the Military Domain">summit</a> was held in 2023 in the Hague on the issue of using AI responsibly in the military domain.<sup id="cite_ref-reg_131-0" class="reference"><a href="#cite_note-reg-131"><span class="cite-bracket">&#91;</span>131<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Singularity">Singularity</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=22" title="Edit section: Singularity"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Further information: <a href="/wiki/Existential_risk_from_artificial_general_intelligence" class="mw-redirect" title="Existential risk from artificial general intelligence">Existential risk from artificial general intelligence</a>, <a href="/wiki/Superintelligence" title="Superintelligence">Superintelligence</a>, and <a href="/wiki/Technological_singularity" title="Technological singularity">Technological singularity</a></div> <p><a href="/wiki/Vernor_Vinge" title="Vernor Vinge">Vernor Vinge</a>, among numerous others, have suggested that a moment may come when some, if not all, computers are smarter than humans. The onset of this event is commonly referred to as "<a href="/wiki/Technological_singularity" title="Technological singularity">the Singularity</a>"<sup id="cite_ref-NYT-2009_132-0" class="reference"><a href="#cite_note-NYT-2009-132"><span class="cite-bracket">&#91;</span>132<span class="cite-bracket">&#93;</span></a></sup> and is the central point of discussion in the philosophy of <a href="/wiki/Singularitarianism" title="Singularitarianism">Singularitarianism</a>. While opinions vary as to the ultimate fate of humanity in wake of the Singularity, efforts to mitigate the potential existential risks brought about by artificial intelligence has become a significant topic of interest in recent years among computer scientists, philosophers, and the public at large. </p><p>Many researchers have argued that, through an <a href="/wiki/Intelligence_explosion" class="mw-redirect" title="Intelligence explosion">intelligence explosion</a>, a self-improving AI could become so powerful that humans would not be able to stop it from achieving its goals.<sup id="cite_ref-Muehlhauser,_Luke_2012_133-0" class="reference"><a href="#cite_note-Muehlhauser,_Luke_2012-133"><span class="cite-bracket">&#91;</span>133<span class="cite-bracket">&#93;</span></a></sup> In his paper "Ethical Issues in Advanced Artificial Intelligence" and subsequent book <i><a href="/wiki/Superintelligence:_Paths,_Dangers,_Strategies" title="Superintelligence: Paths, Dangers, Strategies">Superintelligence: Paths, Dangers, Strategies</a></i>, philosopher <a href="/wiki/Nick_Bostrom" title="Nick Bostrom">Nick Bostrom</a> argues that artificial intelligence has the capability to bring about human extinction. He claims that an <a href="/wiki/Artificial_superintelligence" class="mw-redirect" title="Artificial superintelligence">artificial superintelligence</a> would be capable of independent initiative and of making its own plans, and may therefore be more appropriately thought of as an autonomous agent. Since artificial intellects need not share our human motivational tendencies, it would be up to the designers of the superintelligence to specify its original motivations. Because a superintelligent AI would be able to bring about almost any possible outcome and to thwart any attempt to prevent the implementation of its goals, many uncontrolled <a href="/wiki/Unintended_consequences" title="Unintended consequences">unintended consequences</a> could arise. It could kill off all other agents, persuade them to change their behavior, or block their attempts at interference.<sup id="cite_ref-Bostrom,_Nick_2003_134-0" class="reference"><a href="#cite_note-Bostrom,_Nick_2003-134"><span class="cite-bracket">&#91;</span>134<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-135" class="reference"><a href="#cite_note-135"><span class="cite-bracket">&#91;</span>135<span class="cite-bracket">&#93;</span></a></sup> </p><p>However, Bostrom contended that superintelligence also has the potential to solve many difficult problems such as disease, poverty, and environmental destruction, and could help <a href="/wiki/Human_enhancement" title="Human enhancement">humans enhance themselves</a>.<sup id="cite_ref-136" class="reference"><a href="#cite_note-136"><span class="cite-bracket">&#91;</span>136<span class="cite-bracket">&#93;</span></a></sup> </p><p>Unless moral philosophy provides us with a flawless ethical theory, an AI's utility function could allow for many potentially harmful scenarios that conform with a given ethical framework but not "common sense". According to <a href="/wiki/Eliezer_Yudkowsky" title="Eliezer Yudkowsky">Eliezer Yudkowsky</a>, there is little reason to suppose that an artificially designed mind would have such an adaptation.<sup id="cite_ref-137" class="reference"><a href="#cite_note-137"><span class="cite-bracket">&#91;</span>137<span class="cite-bracket">&#93;</span></a></sup> AI researchers such as <a href="/wiki/Stuart_J._Russell" title="Stuart J. Russell">Stuart J. Russell</a>,<sup id="cite_ref-138" class="reference"><a href="#cite_note-138"><span class="cite-bracket">&#91;</span>138<span class="cite-bracket">&#93;</span></a></sup> <a href="/wiki/Bill_Hibbard" title="Bill Hibbard">Bill Hibbard</a>,<sup id="cite_ref-hibbard_2014_102-1" class="reference"><a href="#cite_note-hibbard_2014-102"><span class="cite-bracket">&#91;</span>102<span class="cite-bracket">&#93;</span></a></sup> <a href="/wiki/Roman_Yampolskiy" title="Roman Yampolskiy">Roman Yampolskiy</a>,<sup id="cite_ref-139" class="reference"><a href="#cite_note-139"><span class="cite-bracket">&#91;</span>139<span class="cite-bracket">&#93;</span></a></sup> <a href="/wiki/Shannon_Vallor" title="Shannon Vallor">Shannon Vallor</a>,<sup id="cite_ref-140" class="reference"><a href="#cite_note-140"><span class="cite-bracket">&#91;</span>140<span class="cite-bracket">&#93;</span></a></sup> <a href="/w/index.php?title=Steven_Umbrello&amp;action=edit&amp;redlink=1" class="new" title="Steven Umbrello (page does not exist)">Steven Umbrello</a><sup id="cite_ref-141" class="reference"><a href="#cite_note-141"><span class="cite-bracket">&#91;</span>141<span class="cite-bracket">&#93;</span></a></sup> and <a href="/wiki/Luciano_Floridi" title="Luciano Floridi">Luciano Floridi</a><sup id="cite_ref-142" class="reference"><a href="#cite_note-142"><span class="cite-bracket">&#91;</span>142<span class="cite-bracket">&#93;</span></a></sup> have proposed design strategies for developing beneficial machines. </p> <div class="mw-heading mw-heading3"><h3 id="Solutions_and_approaches">Solutions and approaches</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=23" title="Edit section: Solutions and approaches"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>To address ethical challenges in artificial intelligence, developers have introduced various systems designed to ensure responsible AI behavior. Examples include <a href="/wiki/Nvidia" title="Nvidia">Nvidia</a>'s <sup id="cite_ref-143" class="reference"><a href="#cite_note-143"><span class="cite-bracket">&#91;</span>143<span class="cite-bracket">&#93;</span></a></sup> <a href="/wiki/Llama_(language_model)" title="Llama (language model)">Llama</a> Guard, which focuses on improving the <a href="/wiki/AI_safety" title="AI safety">safety</a> and <a href="/wiki/AI_alignment" title="AI alignment">alignment</a> of large AI models, <sup id="cite_ref-144" class="reference"><a href="#cite_note-144"><span class="cite-bracket">&#91;</span>144<span class="cite-bracket">&#93;</span></a></sup> and <a href="/wiki/Preamble_(company)" title="Preamble (company)">Preamble</a>'s customizable guardrail platform.<sup id="cite_ref-:0_145-0" class="reference"><a href="#cite_note-:0-145"><span class="cite-bracket">&#91;</span>145<span class="cite-bracket">&#93;</span></a></sup> These systems aim to address issues such as algorithmic bias, misuse, and vulnerabilities, including <a href="/wiki/Prompt_injection" title="Prompt injection">prompt injection</a> attacks, by embedding ethical guidelines into the functionality of AI models. </p><p>Prompt injection, a technique by which malicious inputs can cause AI systems to produce unintended or harmful outputs, has been a focus of these developments. Some approaches use customizable policies and rules to analyze both inputs and outputs, ensuring that potentially problematic interactions are filtered or mitigated.<sup id="cite_ref-:0_145-1" class="reference"><a href="#cite_note-:0-145"><span class="cite-bracket">&#91;</span>145<span class="cite-bracket">&#93;</span></a></sup> Other tools focus on applying structured constraints to inputs, restricting outputs to predefined parameters,<sup id="cite_ref-146" class="reference"><a href="#cite_note-146"><span class="cite-bracket">&#91;</span>146<span class="cite-bracket">&#93;</span></a></sup> or leveraging real-time monitoring mechanisms to identify and address vulnerabilities.<sup id="cite_ref-147" class="reference"><a href="#cite_note-147"><span class="cite-bracket">&#91;</span>147<span class="cite-bracket">&#93;</span></a></sup> These efforts reflect a broader trend in ensuring that artificial intelligence systems are designed with safety and ethical considerations at the forefront, particularly as their use becomes increasingly widespread in critical applications.<sup id="cite_ref-148" class="reference"><a href="#cite_note-148"><span class="cite-bracket">&#91;</span>148<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Institutions_in_AI_policy_&amp;_ethics"><span id="Institutions_in_AI_policy_.26_ethics"></span>Institutions in AI policy &amp; ethics</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=24" title="Edit section: Institutions in AI policy &amp; ethics"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>There are many organizations concerned with AI ethics and policy, public and governmental as well as corporate and societal. </p><p><a href="/wiki/Amazon.com,_Inc." class="mw-redirect" title="Amazon.com, Inc.">Amazon</a>, <a href="/wiki/Google" title="Google">Google</a>, <a href="/wiki/Facebook" title="Facebook">Facebook</a>, <a href="/wiki/IBM" title="IBM">IBM</a>, and <a href="/wiki/Microsoft" title="Microsoft">Microsoft</a> have established a <a href="/wiki/Nonprofit_organization" title="Nonprofit organization">non-profit</a>, The Partnership on AI to Benefit People and Society, to formulate best practices on artificial intelligence technologies, advance the public's understanding, and to serve as a platform about artificial intelligence. Apple joined in January 2017. The corporate members will make financial and research contributions to the group, while engaging with the scientific community to bring academics onto the board.<sup id="cite_ref-149" class="reference"><a href="#cite_note-149"><span class="cite-bracket">&#91;</span>149<span class="cite-bracket">&#93;</span></a></sup> </p><p>The <a href="/wiki/IEEE" class="mw-redirect" title="IEEE">IEEE</a> put together a Global Initiative on Ethics of Autonomous and Intelligent Systems which has been creating and revising guidelines with the help of public input, and accepts as members many professionals from within and without its organization. The IEEE's <a rel="nofollow" class="external text" href="https://standards.ieee.org/industry-connections/activities/ieee-global-initiative/">Ethics of Autonomous Systems</a> initiative aims to address ethical dilemmas related to decision-making and the impact on society while developing guidelines for the development and use of autonomous systems. In particular in domains like artificial intelligence and robotics, the Foundation for Responsible Robotics is dedicated to promoting moral behavior as well as responsible robot design and use, ensuring that robots maintain moral principles and are congruent with human values. </p><p>Traditionally, <a href="/wiki/Government" title="Government">government</a> has been used by societies to ensure ethics are observed through legislation and policing. There are now many efforts by national governments, as well as transnational government and <a href="/wiki/NGO" class="mw-redirect" title="NGO">non-government organizations</a> to ensure AI is ethically applied. </p><p>AI ethics work is structured by personal values and professional commitments, and involves constructing contextual meaning through data and algorithms. Therefore, AI ethics work needs to be incentivized.<sup id="cite_ref-150" class="reference"><a href="#cite_note-150"><span class="cite-bracket">&#91;</span>150<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Intergovernmental_initiatives">Intergovernmental initiatives</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=25" title="Edit section: Intergovernmental initiatives"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <ul><li>The <a href="/wiki/European_Commission" title="European Commission">European Commission</a> has a High-Level Expert Group on Artificial Intelligence. On 8 April 2019, this published its "Ethics Guidelines for <a href="/wiki/Trustworthy_AI" title="Trustworthy AI">Trustworthy Artificial Intelligence</a>".<sup id="cite_ref-151" class="reference"><a href="#cite_note-151"><span class="cite-bracket">&#91;</span>151<span class="cite-bracket">&#93;</span></a></sup> The European Commission also has a Robotics and Artificial Intelligence Innovation and Excellence unit, which published a white paper on excellence and trust in artificial intelligence innovation on 19 February 2020.<sup id="cite_ref-152" class="reference"><a href="#cite_note-152"><span class="cite-bracket">&#91;</span>152<span class="cite-bracket">&#93;</span></a></sup> The European Commission also proposed the <a href="/wiki/Artificial_Intelligence_Act" title="Artificial Intelligence Act">Artificial Intelligence Act</a>.<sup id="cite_ref-Financial_Times-2021_82-1" class="reference"><a href="#cite_note-Financial_Times-2021-82"><span class="cite-bracket">&#91;</span>82<span class="cite-bracket">&#93;</span></a></sup></li> <li>The <a href="/wiki/OECD" title="OECD">OECD</a> established an OECD AI Policy Observatory.<sup id="cite_ref-153" class="reference"><a href="#cite_note-153"><span class="cite-bracket">&#91;</span>153<span class="cite-bracket">&#93;</span></a></sup></li> <li>In 2021, <a href="/wiki/UNESCO" title="UNESCO">UNESCO</a> adopted the Recommendation on the Ethics of Artificial Intelligence,<sup id="cite_ref-154" class="reference"><a href="#cite_note-154"><span class="cite-bracket">&#91;</span>154<span class="cite-bracket">&#93;</span></a></sup> the first global standard on the ethics of AI.<sup id="cite_ref-155" class="reference"><a href="#cite_note-155"><span class="cite-bracket">&#91;</span>155<span class="cite-bracket">&#93;</span></a></sup></li></ul> <div class="mw-heading mw-heading3"><h3 id="Governmental_initiatives">Governmental initiatives</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=26" title="Edit section: Governmental initiatives"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <ul><li>In the <a href="/wiki/United_States" title="United States">United States</a> the <a href="/wiki/Obama" class="mw-redirect" title="Obama">Obama</a> administration put together a Roadmap for AI Policy.<sup id="cite_ref-156" class="reference"><a href="#cite_note-156"><span class="cite-bracket">&#91;</span>156<span class="cite-bracket">&#93;</span></a></sup> The Obama Administration released two prominent <a href="/wiki/White_papers" class="mw-redirect" title="White papers">white papers</a> on the future and impact of AI. In 2019 the White House through an executive memo known as the "American AI Initiative" instructed NIST the (National Institute of Standards and Technology) to begin work on Federal Engagement of AI Standards (February 2019).<sup id="cite_ref-157" class="reference"><a href="#cite_note-157"><span class="cite-bracket">&#91;</span>157<span class="cite-bracket">&#93;</span></a></sup></li> <li>In January 2020, in the United States, the <a href="/wiki/First_presidency_of_Donald_Trump" title="First presidency of Donald Trump">Trump Administration</a> released a draft executive order issued by the Office of Management and Budget (OMB) on "Guidance for Regulation of Artificial Intelligence Applications" ("OMB AI Memorandum"). The order emphasizes the need to invest in AI applications, boost public trust in AI, reduce barriers for usage of AI, and keep American AI technology competitive in a global market. There is a nod to the need for privacy concerns, but no further detail on enforcement. The advances of American AI technology seems to be the focus and priority. Additionally, federal entities are even encouraged to use the order to circumnavigate any state laws and regulations that a market might see as too onerous to fulfill.<sup id="cite_ref-158" class="reference"><a href="#cite_note-158"><span class="cite-bracket">&#91;</span>158<span class="cite-bracket">&#93;</span></a></sup></li> <li>The <a href="/wiki/Computing_Community_Consortium" title="Computing Community Consortium">Computing Community Consortium (CCC)</a> weighed in with a 100-plus page draft report<sup id="cite_ref-159" class="reference"><a href="#cite_note-159"><span class="cite-bracket">&#91;</span>159<span class="cite-bracket">&#93;</span></a></sup> – <i>A 20-Year Community Roadmap for Artificial Intelligence Research in the US</i><sup id="cite_ref-160" class="reference"><a href="#cite_note-160"><span class="cite-bracket">&#91;</span>160<span class="cite-bracket">&#93;</span></a></sup></li> <li>The <a href="/wiki/Center_for_Security_and_Emerging_Technology" title="Center for Security and Emerging Technology">Center for Security and Emerging Technology</a> advises US policymakers on the security implications of emerging technologies such as AI.</li> <li>In Russia, the first-ever Russian "Codex of ethics of artificial intelligence" for business was signed in 2021. It was driven by <a href="/w/index.php?title=Analytical_Center_for_the_Government_of_the_Russian_Federation&amp;action=edit&amp;redlink=1" class="new" title="Analytical Center for the Government of the Russian Federation (page does not exist)">Analytical Center for the Government of the Russian Federation</a> together with major commercial and academic institutions such as <a href="/wiki/Sberbank" title="Sberbank">Sberbank</a>, <a href="/wiki/Yandex" title="Yandex">Yandex</a>, <a href="/wiki/Rosatom" title="Rosatom">Rosatom</a>, <a href="/wiki/Higher_School_of_Economics" title="Higher School of Economics">Higher School of Economics</a>, <a href="/wiki/Moscow_Institute_of_Physics_and_Technology" title="Moscow Institute of Physics and Technology">Moscow Institute of Physics and Technology</a>, <a href="/wiki/ITMO_University" title="ITMO University">ITMO University</a>, <a href="/wiki/Nanosemantics" title="Nanosemantics">Nanosemantics</a>, <a href="/wiki/Rostelecom" title="Rostelecom">Rostelecom</a>, <a href="/wiki/CIAN" class="mw-redirect" title="CIAN">CIAN</a> and others.<sup id="cite_ref-161" class="reference"><a href="#cite_note-161"><span class="cite-bracket">&#91;</span>161<span class="cite-bracket">&#93;</span></a></sup></li></ul> <div class="mw-heading mw-heading3"><h3 id="Academic_initiatives">Academic initiatives</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=27" title="Edit section: Academic initiatives"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <ul><li>There are three research institutes at the <a href="/wiki/University_of_Oxford" title="University of Oxford">University of Oxford</a> that are centrally focused on AI ethics. The <a href="/wiki/Future_of_Humanity_Institute" title="Future of Humanity Institute">Future of Humanity Institute</a> that focuses both on AI Safety<sup id="cite_ref-162" class="reference"><a href="#cite_note-162"><span class="cite-bracket">&#91;</span>162<span class="cite-bracket">&#93;</span></a></sup> and the Governance of AI.<sup id="cite_ref-163" class="reference"><a href="#cite_note-163"><span class="cite-bracket">&#91;</span>163<span class="cite-bracket">&#93;</span></a></sup> The Institute for Ethics in AI, directed by <a href="/wiki/John_Tasioulas" title="John Tasioulas">John Tasioulas</a>, whose primary goal, among others, is to promote AI ethics as a field proper in comparison to related applied ethics fields. The <a href="/wiki/Oxford_Internet_Institute" title="Oxford Internet Institute">Oxford Internet Institute</a>, directed by <a href="/wiki/Luciano_Floridi" title="Luciano Floridi">Luciano Floridi</a>, focuses on the ethics of near-term AI technologies and ICTs.<sup id="cite_ref-164" class="reference"><a href="#cite_note-164"><span class="cite-bracket">&#91;</span>164<span class="cite-bracket">&#93;</span></a></sup></li> <li>The Centre for Digital Governance at the <a href="/wiki/Hertie_School" title="Hertie School">Hertie School</a> in Berlin was co-founded by <a href="/wiki/Joanna_Bryson" title="Joanna Bryson">Joanna Bryson</a> to research questions of ethics and technology.<sup id="cite_ref-165" class="reference"><a href="#cite_note-165"><span class="cite-bracket">&#91;</span>165<span class="cite-bracket">&#93;</span></a></sup></li> <li>The <a href="/wiki/AI_Now_Institute" title="AI Now Institute">AI Now Institute</a> at <a href="/wiki/NYU" class="mw-redirect" title="NYU">NYU</a> is a research institute studying the social implications of artificial intelligence. Its interdisciplinary research focuses on the themes bias and inclusion, labour and automation, rights and liberties, and safety and civil infrastructure.<sup id="cite_ref-166" class="reference"><a href="#cite_note-166"><span class="cite-bracket">&#91;</span>166<span class="cite-bracket">&#93;</span></a></sup></li> <li>The <a href="/wiki/Institute_for_Ethics_and_Emerging_Technologies" title="Institute for Ethics and Emerging Technologies">Institute for Ethics and Emerging Technologies</a> (IEET) researches the effects of AI on unemployment,<sup id="cite_ref-167" class="reference"><a href="#cite_note-167"><span class="cite-bracket">&#91;</span>167<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-168" class="reference"><a href="#cite_note-168"><span class="cite-bracket">&#91;</span>168<span class="cite-bracket">&#93;</span></a></sup> and policy.</li> <li>The <a href="/wiki/Institute_for_Ethics_in_Artificial_Intelligence" class="mw-redirect" title="Institute for Ethics in Artificial Intelligence">Institute for Ethics in Artificial Intelligence</a> (IEAI) at the <a href="/wiki/Technical_University_of_Munich" title="Technical University of Munich">Technical University of Munich</a> directed by <a href="/wiki/Christoph_L%C3%BCtge" title="Christoph Lütge">Christoph Lütge</a> conducts research across various domains such as mobility, employment, healthcare and sustainability.<sup id="cite_ref-169" class="reference"><a href="#cite_note-169"><span class="cite-bracket">&#91;</span>169<span class="cite-bracket">&#93;</span></a></sup></li> <li><a href="/wiki/Barbara_J._Grosz" title="Barbara J. Grosz">Barbara J. Grosz</a>, the Higgins Professor of Natural Sciences at the <a href="/wiki/Harvard_John_A._Paulson_School_of_Engineering_and_Applied_Sciences" title="Harvard John A. Paulson School of Engineering and Applied Sciences">Harvard John A. Paulson School of Engineering and Applied Sciences</a> has initiated the Embedded EthiCS into <a href="/wiki/Harvard_University" title="Harvard University">Harvard</a>'s computer science curriculum to develop a future generation of computer scientists with worldview that takes into account the social impact of their work.<sup id="cite_ref-170" class="reference"><a href="#cite_note-170"><span class="cite-bracket">&#91;</span>170<span class="cite-bracket">&#93;</span></a></sup></li></ul> <div class="mw-heading mw-heading3"><h3 id="Private_organizations">Private organizations</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=28" title="Edit section: Private organizations"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <ul><li><a href="/wiki/Algorithmic_Justice_League" title="Algorithmic Justice League">Algorithmic Justice League</a><sup id="cite_ref-171" class="reference"><a href="#cite_note-171"><span class="cite-bracket">&#91;</span>171<span class="cite-bracket">&#93;</span></a></sup></li> <li><a href="/wiki/Black_in_AI" title="Black in AI">Black in AI</a><sup id="cite_ref-172" class="reference"><a href="#cite_note-172"><span class="cite-bracket">&#91;</span>172<span class="cite-bracket">&#93;</span></a></sup></li> <li><a href="/wiki/Data_for_Black_Lives" title="Data for Black Lives">Data for Black Lives</a><sup id="cite_ref-173" class="reference"><a href="#cite_note-173"><span class="cite-bracket">&#91;</span>173<span class="cite-bracket">&#93;</span></a></sup></li></ul> <div class="mw-heading mw-heading2"><h2 id="History">History</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=29" title="Edit section: History"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Historically speaking, the investigation of moral and ethical implications of "thinking machines" goes back at least to the <a href="/wiki/Age_of_Enlightenment" title="Age of Enlightenment">Enlightenment</a>: <a href="/wiki/Gottfried_Wilhelm_Leibniz" title="Gottfried Wilhelm Leibniz">Leibniz</a> already poses the question if we might attribute intelligence to a mechanism that behaves as if it were a sentient being,<sup id="cite_ref-174" class="reference"><a href="#cite_note-174"><span class="cite-bracket">&#91;</span>174<span class="cite-bracket">&#93;</span></a></sup> and so does <a href="/wiki/Ren%C3%A9_Descartes" title="René Descartes">Descartes</a>, who describes what could be considered an early version of the <a href="/wiki/Turing_test" title="Turing test">Turing test</a>.<sup id="cite_ref-175" class="reference"><a href="#cite_note-175"><span class="cite-bracket">&#91;</span>175<span class="cite-bracket">&#93;</span></a></sup> </p><p>The <a href="/wiki/Romanticism" title="Romanticism">romantic</a> period has several times envisioned artificial creatures that escape the control of their creator with dire consequences, most famously in <a href="/wiki/Mary_Shelley" title="Mary Shelley">Mary Shelley</a>'s <i><a href="/wiki/Frankenstein" title="Frankenstein">Frankenstein</a></i>. The widespread preoccupation with industrialization and mechanization in the 19th and early 20th century, however, brought ethical implications of unhinged technical developments to the forefront of fiction: <a href="/wiki/R.U.R." title="R.U.R."><i>R.U.R – Rossum's Universal Robots</i></a>, <a href="/wiki/Karel_%C4%8Capek" title="Karel Čapek">Karel Čapek</a>'s play of sentient robots endowed with emotions used as slave labor is not only credited with the invention of the term 'robot' (derived from the Czech word for forced labor, <i>robota</i>)<sup id="cite_ref-176" class="reference"><a href="#cite_note-176"><span class="cite-bracket">&#91;</span>176<span class="cite-bracket">&#93;</span></a></sup> but was also an international success after it premiered in 1921. <a href="/wiki/George_Bernard_Shaw" title="George Bernard Shaw">George Bernard Shaw</a>'s play <i><a href="/wiki/Back_to_Methuselah" title="Back to Methuselah">Back to Methuselah</a></i>, published in 1921, questions at one point the validity of thinking machines that act like humans; <a href="/wiki/Fritz_Lang" title="Fritz Lang">Fritz Lang</a>'s 1927 film <i><a href="/wiki/Metropolis_(1927_film)" title="Metropolis (1927 film)">Metropolis</a></i> shows an <a href="/wiki/Android_(robot)" title="Android (robot)">android</a> leading the uprising of the exploited masses against the oppressive regime of a <a href="/wiki/Technocracy" title="Technocracy">technocratic</a> society. In the 1950s, <a href="/wiki/Isaac_Asimov" title="Isaac Asimov">Isaac Asimov</a> considered the issue of how to control machines in <i><a href="/wiki/I,_Robot" title="I, Robot">I, Robot</a></i>. At the insistence of his editor <a href="/wiki/John_W._Campbell_Jr." class="mw-redirect" title="John W. Campbell Jr.">John W. Campbell Jr.</a>, he proposed the <a href="/wiki/Three_Laws_of_Robotics" title="Three Laws of Robotics">Three Laws of Robotics</a> to govern artificially intelligent systems. Much of his work was then spent testing the boundaries of his three laws to see where they would break down, or where they would create paradoxical or unanticipated behavior.<sup id="cite_ref-177" class="reference"><a href="#cite_note-177"><span class="cite-bracket">&#91;</span>177<span class="cite-bracket">&#93;</span></a></sup> His work suggests that no set of fixed laws can sufficiently anticipate all possible circumstances.<sup id="cite_ref-Asimov2008_178-0" class="reference"><a href="#cite_note-Asimov2008-178"><span class="cite-bracket">&#91;</span>178<span class="cite-bracket">&#93;</span></a></sup> More recently, academics and many governments have challenged the idea that AI can itself be held accountable.<sup id="cite_ref-lacuna_179-0" class="reference"><a href="#cite_note-lacuna-179"><span class="cite-bracket">&#91;</span>179<span class="cite-bracket">&#93;</span></a></sup> A panel convened by the <a href="/wiki/United_Kingdom" title="United Kingdom">United Kingdom</a> in 2010 revised Asimov's laws to clarify that AI is the responsibility either of its manufacturers, or of its owner/operator.<sup id="cite_ref-principles_180-0" class="reference"><a href="#cite_note-principles-180"><span class="cite-bracket">&#91;</span>180<span class="cite-bracket">&#93;</span></a></sup> </p><p><a href="/wiki/Eliezer_Yudkowsky" title="Eliezer Yudkowsky">Eliezer Yudkowsky</a>, from the <a href="/wiki/Machine_Intelligence_Research_Institute" title="Machine Intelligence Research Institute">Machine Intelligence Research Institute</a> suggested in 2004 a need to study how to build a "<a href="/wiki/Friendly_AI" class="mw-redirect" title="Friendly AI">Friendly AI</a>", meaning that there should also be efforts to make AI intrinsically friendly and humane.<sup id="cite_ref-181" class="reference"><a href="#cite_note-181"><span class="cite-bracket">&#91;</span>181<span class="cite-bracket">&#93;</span></a></sup> </p><p>In 2009, academics and technical experts attended a conference organized by the <a href="/wiki/Association_for_the_Advancement_of_Artificial_Intelligence" title="Association for the Advancement of Artificial Intelligence">Association for the Advancement of Artificial Intelligence</a> to discuss the potential impact of robots and computers, and the impact of the hypothetical possibility that they could become self-sufficient and make their own decisions. They discussed the possibility and the extent to which computers and robots might be able to acquire any level of autonomy, and to what degree they could use such abilities to possibly pose any threat or hazard.<sup id="cite_ref-182" class="reference"><a href="#cite_note-182"><span class="cite-bracket">&#91;</span>182<span class="cite-bracket">&#93;</span></a></sup> They noted that some machines have acquired various forms of semi-autonomy, including being able to find power sources on their own and being able to independently choose targets to attack with weapons. They also noted that some computer viruses can evade elimination and have achieved "cockroach intelligence". They noted that self-awareness as depicted in science-fiction is probably unlikely, but that there were other potential hazards and pitfalls.<sup id="cite_ref-NYT-2009_132-1" class="reference"><a href="#cite_note-NYT-2009-132"><span class="cite-bracket">&#91;</span>132<span class="cite-bracket">&#93;</span></a></sup> </p><p>Also in 2009, during an experiment at the Laboratory of Intelligent Systems in the Ecole Polytechnique Fédérale of <a href="/wiki/Lausanne" title="Lausanne">Lausanne</a>, Switzerland, robots that were programmed to cooperate with each other (in searching out a beneficial resource and avoiding a poisonous one) eventually learned to lie to each other in an attempt to hoard the beneficial resource.<sup id="cite_ref-183" class="reference"><a href="#cite_note-183"><span class="cite-bracket">&#91;</span>183<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Role_and_impact_of_fiction">Role and impact of fiction</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=30" title="Edit section: Role and impact of fiction"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Artificial_intelligence_in_fiction" title="Artificial intelligence in fiction">Artificial intelligence in fiction</a></div> <p>The role of fiction with regards to AI ethics has been a complex one.<sup id="cite_ref-Bassett_184-0" class="reference"><a href="#cite_note-Bassett-184"><span class="cite-bracket">&#91;</span>184<span class="cite-bracket">&#93;</span></a></sup> One can distinguish three levels at which fiction has impacted the development of artificial intelligence and robotics: Historically, fiction has been prefiguring common tropes that have not only influenced goals and visions for AI, but also outlined ethical questions and common fears associated with it. During the second half of the twentieth and the first decades of the twenty-first century, popular culture, in particular movies, TV series and video games have frequently echoed preoccupations and dystopian projections around ethical questions concerning AI and robotics. Recently, these themes have also been increasingly treated in literature beyond the realm of science fiction. And, as Carme Torras, research professor at the <i>Institut de Robòtica i Informàtica Industrial</i> (Institute of robotics and industrial computing) at the Technical University of Catalonia notes,<sup id="cite_ref-185" class="reference"><a href="#cite_note-185"><span class="cite-bracket">&#91;</span>185<span class="cite-bracket">&#93;</span></a></sup> in higher education, science fiction is also increasingly used for teaching technology-related ethical issues in technological degrees. </p> <div class="mw-heading mw-heading3"><h3 id="TV_series">TV series</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=31" title="Edit section: TV series"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>While ethical questions linked to AI have been featured in science fiction literature and <a href="/wiki/List_of_artificial_intelligence_films" title="List of artificial intelligence films">feature films</a> for decades, the emergence of the TV series as a genre allowing for longer and more complex story lines and character development has led to some significant contributions that deal with ethical implications of technology. The Swedish series <i><a href="/wiki/Real_Humans" title="Real Humans">Real Humans</a></i> (2012–2013) tackled the complex ethical and social consequences linked to the integration of artificial sentient beings in society. The British dystopian science fiction anthology series <i><a href="/wiki/Black_Mirror" title="Black Mirror">Black Mirror</a></i> (2013–2019) was particularly notable for experimenting with dystopian fictional developments linked to a wide variety of recent technology developments. Both the French series <a href="/wiki/Osmosis_(TV_series)" title="Osmosis (TV series)"><i>Osmosis</i></a> (2020) and British series <a href="/wiki/The_One_(TV_series)" title="The One (TV series)"><i>The One</i></a> deal with the question of what can happen if technology tries to find the ideal partner for a person. Several episodes of the Netflix series <a href="/wiki/Love,_Death_%26_Robots" title="Love, Death &amp; Robots"><i>Love, Death+Robots</i></a> have imagined scenes of robots and humans living together. The most representative one of them is S02 E01, it shows how bad the consequences can be when robots get out of control if humans rely too much on them in their lives.<sup id="cite_ref-186" class="reference"><a href="#cite_note-186"><span class="cite-bracket">&#91;</span>186<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Future_visions_in_fiction_and_games">Future visions in fiction and games</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=32" title="Edit section: Future visions in fiction and games"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The movie <i><a href="/wiki/The_Thirteenth_Floor" title="The Thirteenth Floor">The Thirteenth Floor</a></i> suggests a future where <a href="/wiki/Simulated_reality" title="Simulated reality">simulated worlds</a> with sentient inhabitants are created by computer <a href="/wiki/Game_console" class="mw-redirect" title="Game console">game consoles</a> for the purpose of entertainment. The movie <i><a href="/wiki/The_Matrix" title="The Matrix">The Matrix</a></i> suggests a future where the dominant species on planet Earth are sentient machines and humanity is treated with utmost <a href="/wiki/Speciesism" title="Speciesism">speciesism</a>. The short story "<a href="/wiki/The_Planck_Dive" title="The Planck Dive">The Planck Dive</a>" suggests a future where humanity has turned itself into software that can be duplicated and optimized and the relevant distinction between types of software is sentient and non-sentient. The same idea can be found in the <a href="/wiki/Emergency_Medical_Hologram" class="mw-redirect" title="Emergency Medical Hologram">Emergency Medical Hologram</a> of <i><a href="/wiki/USS_Voyager_(NCC-74656)" class="mw-redirect" title="USS Voyager (NCC-74656)">Starship Voyager</a></i>, which is an apparently sentient copy of a reduced subset of the consciousness of its creator, <a href="/wiki/Lewis_Zimmerman" class="mw-redirect" title="Lewis Zimmerman">Dr. Zimmerman</a>, who, for the best motives, has created the system to give medical assistance in case of emergencies. The movies <i><a href="/wiki/Bicentennial_Man_(film)" title="Bicentennial Man (film)">Bicentennial Man</a></i> and <i><a href="/wiki/A.I._Artificial_Intelligence" title="A.I. Artificial Intelligence">A.I.</a></i> deal with the possibility of sentient robots that could love. <i><a href="/wiki/I,_Robot_(film)" title="I, Robot (film)">I, Robot</a></i> explored some aspects of Asimov's three laws. All these scenarios try to foresee possibly unethical consequences of the creation of sentient computers.<sup id="cite_ref-187" class="reference"><a href="#cite_note-187"><span class="cite-bracket">&#91;</span>187<span class="cite-bracket">&#93;</span></a></sup> </p><p>The ethics of artificial intelligence is one of several core themes in BioWare's <a href="/wiki/Mass_Effect" title="Mass Effect">Mass Effect</a> series of games.<sup id="cite_ref-188" class="reference"><a href="#cite_note-188"><span class="cite-bracket">&#91;</span>188<span class="cite-bracket">&#93;</span></a></sup> It explores the scenario of a civilization accidentally creating AI through a rapid increase in computational power through a global scale <a href="/wiki/Neural_network" title="Neural network">neural network</a>. This event caused an ethical schism between those who felt bestowing organic rights upon the newly sentient Geth was appropriate and those who continued to see them as disposable machinery and fought to destroy them. Beyond the initial conflict, the complexity of the relationship between the machines and their creators is another ongoing theme throughout the story. </p><p><i><a href="/wiki/Detroit:_Become_Human" title="Detroit: Become Human">Detroit: Become Human</a></i> is one of the most famous video games which discusses the ethics of artificial intelligence recently. Quantic Dream designed the chapters of the game using interactive storylines to give players a more immersive gaming experience. Players manipulate three different awakened bionic people in the face of different events to make different choices to achieve the purpose of changing the human view of the bionic group and different choices will result in different endings. This is one of the few games that puts players in the bionic perspective, which allows them to better consider the rights and interests of robots once a true artificial intelligence is created.<sup id="cite_ref-189" class="reference"><a href="#cite_note-189"><span class="cite-bracket">&#91;</span>189<span class="cite-bracket">&#93;</span></a></sup> </p><p>Over time, debates have tended to focus less and less on <i>possibility</i> and more on <i>desirability</i>,<sup id="cite_ref-190" class="reference"><a href="#cite_note-190"><span class="cite-bracket">&#91;</span>190<span class="cite-bracket">&#93;</span></a></sup> as emphasized in the <a href="/wiki/Hugo_de_Garis#The_Artilect_War" title="Hugo de Garis">"Cosmist" and "Terran" debates</a> initiated by <a href="/wiki/Hugo_de_Garis" title="Hugo de Garis">Hugo de Garis</a> and <a href="/wiki/Kevin_Warwick" title="Kevin Warwick">Kevin Warwick</a>. A Cosmist, according to Hugo de Garis, is actually seeking to build more intelligent successors to the human species. </p><p>Experts at the University of Cambridge have argued that AI is portrayed in fiction and nonfiction overwhelmingly as racially White, in ways that distort perceptions of its risks and benefits.<sup id="cite_ref-191" class="reference"><a href="#cite_note-191"><span class="cite-bracket">&#91;</span>191<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="See_also">See also</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=33" title="Edit section: See also"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1184024115">.mw-parser-output .div-col{margin-top:0.3em;column-width:30em}.mw-parser-output .div-col-small{font-size:90%}.mw-parser-output .div-col-rules{column-rule:1px solid #aaa}.mw-parser-output .div-col dl,.mw-parser-output .div-col ol,.mw-parser-output .div-col ul{margin-top:0}.mw-parser-output .div-col li,.mw-parser-output .div-col dd{page-break-inside:avoid;break-inside:avoid-column}</style><div class="div-col" style="column-width: 30em;"> <ul><li><a href="/wiki/AI_takeover" title="AI takeover">AI takeover</a></li> <li><a href="/wiki/AI_washing" title="AI washing">AI washing</a></li> <li><a href="/wiki/Artificial_consciousness" title="Artificial consciousness">Artificial consciousness</a></li> <li><a href="/wiki/Artificial_general_intelligence" title="Artificial general intelligence">Artificial general intelligence</a> (AGI)</li> <li><a href="/wiki/Computer_ethics" title="Computer ethics">Computer ethics</a></li> <li><a href="/wiki/Dead_internet_theory" class="mw-redirect" title="Dead internet theory">Dead internet theory</a></li> <li><a href="/wiki/Effective_altruism#Long-term_future_and_global_catastrophic_risks" title="Effective altruism">Effective altruism, the long term future and global catastrophic risks</a></li> <li><a href="/wiki/Artificial_intelligence_and_elections" title="Artificial intelligence and elections">Artificial intelligence and elections</a> - Use of AI in elections and political campaigning.</li> <li><a href="/wiki/Ethics_of_uncertain_sentience" title="Ethics of uncertain sentience">Ethics of uncertain sentience</a></li> <li><a href="/wiki/Existential_risk_from_artificial_general_intelligence" class="mw-redirect" title="Existential risk from artificial general intelligence">Existential risk from artificial general intelligence</a></li> <li><i><a href="/wiki/Human_Compatible" title="Human Compatible">Human Compatible</a></i></li> <li><a href="/wiki/Metaverse_law" title="Metaverse law">Metaverse law</a></li> <li><a href="/wiki/Personhood" title="Personhood">Personhood</a></li> <li><a href="/wiki/Philosophy_of_artificial_intelligence" title="Philosophy of artificial intelligence">Philosophy of artificial intelligence</a></li> <li><a href="/wiki/Regulation_of_artificial_intelligence" title="Regulation of artificial intelligence">Regulation of artificial intelligence</a></li> <li><a href="/wiki/Robotic_governance" title="Robotic governance">Robotic Governance</a></li> <li><a href="/wiki/Roko%27s_basilisk" title="Roko&#39;s basilisk">Roko's basilisk</a></li> <li><i><a href="/wiki/Superintelligence:_Paths,_Dangers,_Strategies" title="Superintelligence: Paths, Dangers, Strategies">Superintelligence: Paths, Dangers, Strategies</a></i></li> <li><a href="/wiki/Suffering_risks" class="mw-redirect" title="Suffering risks">Suffering risks</a></li></ul></div> <div class="mw-heading mw-heading2"><h2 id="References">References</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=34" title="Edit section: References"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1239543626">.mw-parser-output .reflist{margin-bottom:0.5em;list-style-type:decimal}@media screen{.mw-parser-output .reflist{font-size:90%}}.mw-parser-output .reflist .references{font-size:100%;margin-bottom:0;list-style-type:inherit}.mw-parser-output .reflist-columns-2{column-width:30em}.mw-parser-output .reflist-columns-3{column-width:25em}.mw-parser-output .reflist-columns{margin-top:0.3em}.mw-parser-output .reflist-columns ol{margin-top:0}.mw-parser-output .reflist-columns li{page-break-inside:avoid;break-inside:avoid-column}.mw-parser-output .reflist-upper-alpha{list-style-type:upper-alpha}.mw-parser-output .reflist-upper-roman{list-style-type:upper-roman}.mw-parser-output .reflist-lower-alpha{list-style-type:lower-alpha}.mw-parser-output .reflist-lower-greek{list-style-type:lower-greek}.mw-parser-output .reflist-lower-roman{list-style-type:lower-roman}</style><div class="reflist"> <div class="mw-references-wrap mw-references-columns"><ol class="references"> <li id="cite_note-Muller-2020-1"><span class="mw-cite-backlink">^ <a href="#cite_ref-Muller-2020_1-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Muller-2020_1-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><style data-mw-deduplicate="TemplateStyles:r1238218222">.mw-parser-output cite.citation{font-style:inherit;word-wrap:break-word}.mw-parser-output .citation q{quotes:"\"""\"""'""'"}.mw-parser-output .citation:target{background-color:rgba(0,127,255,0.133)}.mw-parser-output .id-lock-free.id-lock-free a{background:url("//upload.wikimedia.org/wikipedia/commons/6/65/Lock-green.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-limited.id-lock-limited a,.mw-parser-output .id-lock-registration.id-lock-registration a{background:url("//upload.wikimedia.org/wikipedia/commons/d/d6/Lock-gray-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-subscription.id-lock-subscription a{background:url("//upload.wikimedia.org/wikipedia/commons/a/aa/Lock-red-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .cs1-ws-icon a{background:url("//upload.wikimedia.org/wikipedia/commons/4/4c/Wikisource-logo.svg")right 0.1em center/12px no-repeat}body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-free a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-limited a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-registration a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-subscription a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .cs1-ws-icon a{background-size:contain;padding:0 1em 0 0}.mw-parser-output .cs1-code{color:inherit;background:inherit;border:none;padding:inherit}.mw-parser-output .cs1-hidden-error{display:none;color:var(--color-error,#d33)}.mw-parser-output .cs1-visible-error{color:var(--color-error,#d33)}.mw-parser-output .cs1-maint{display:none;color:#085;margin-left:0.3em}.mw-parser-output .cs1-kern-left{padding-left:0.2em}.mw-parser-output .cs1-kern-right{padding-right:0.2em}.mw-parser-output .citation .mw-selflink{font-weight:inherit}@media screen{.mw-parser-output .cs1-format{font-size:95%}html.skin-theme-clientpref-night .mw-parser-output .cs1-maint{color:#18911f}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .cs1-maint{color:#18911f}}</style><cite id="CITEREFMüller2020" class="citation web cs1">Müller VC (April 30, 2020). <a rel="nofollow" class="external text" href="https://plato.stanford.edu/entries/ethics-ai/">"Ethics of Artificial Intelligence and Robotics"</a>. <i>Stanford Encyclopedia of Philosophy</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20201010174108/https://plato.stanford.edu/entries/ethics-ai/">Archived</a> from the original on 10 October 2020.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Stanford+Encyclopedia+of+Philosophy&amp;rft.atitle=Ethics+of+Artificial+Intelligence+and+Robotics&amp;rft.date=2020-04-30&amp;rft.aulast=M%C3%BCller&amp;rft.aufirst=Vincent+C.&amp;rft_id=https%3A%2F%2Fplato.stanford.edu%2Fentries%2Fethics-ai%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-2"><span class="mw-cite-backlink"><b><a href="#cite_ref-2">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFVan_Eyghen2024" class="citation journal cs1">Van Eyghen H (2024). <a rel="nofollow" class="external text" href="https://link.springer.com/article/10.1007/s44163-024-00219-z">"AI Algorithms as (Un)virtuous Knowers"</a>. <i>Discover Artificial Intelligence</i>. <b>5</b> (2).</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Discover+Artificial+Intelligence&amp;rft.atitle=AI+Algorithms+as+%28Un%29virtuous+Knowers&amp;rft.volume=5&amp;rft.issue=2&amp;rft.date=2024&amp;rft.aulast=Van+Eyghen&amp;rft.aufirst=Hans&amp;rft_id=https%3A%2F%2Flink.springer.com%2Farticle%2F10.1007%2Fs44163-024-00219-z&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-Andersonweb-3"><span class="mw-cite-backlink"><b><a href="#cite_ref-Andersonweb_3-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAnderson" class="citation web cs1">Anderson. <a rel="nofollow" class="external text" href="http://uhaweb.hartford.edu/anderson/MachineEthics.html">"Machine Ethics"</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20110928233656/https://uhaweb.hartford.edu/anderson/MachineEthics.html">Archived</a> from the original on 28 September 2011<span class="reference-accessdate">. Retrieved <span class="nowrap">27 June</span> 2011</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Machine+Ethics&amp;rft.au=Anderson&amp;rft_id=http%3A%2F%2Fuhaweb.hartford.edu%2Fanderson%2FMachineEthics.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-Anderson2011-4"><span class="mw-cite-backlink"><b><a href="#cite_ref-Anderson2011_4-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAndersonAnderson2011" class="citation book cs1">Anderson M, Anderson SL, eds. (July 2011). <i>Machine Ethics</i>. <a href="/wiki/Cambridge_University_Press" title="Cambridge University Press">Cambridge University Press</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-521-11235-2" title="Special:BookSources/978-0-521-11235-2"><bdi>978-0-521-11235-2</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Machine+Ethics&amp;rft.pub=Cambridge+University+Press&amp;rft.date=2011-07&amp;rft.isbn=978-0-521-11235-2&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-Anderson2006-5"><span class="mw-cite-backlink"><b><a href="#cite_ref-Anderson2006_5-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAndersonAnderson2006" class="citation journal cs1">Anderson M, Anderson S (July 2006). "Guest Editors' Introduction: Machine Ethics". <i>IEEE Intelligent Systems</i>. <b>21</b> (4): <span class="nowrap">10–</span>11. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2Fmis.2006.70">10.1109/mis.2006.70</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:9570832">9570832</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=IEEE+Intelligent+Systems&amp;rft.atitle=Guest+Editors%27+Introduction%3A+Machine+Ethics&amp;rft.volume=21&amp;rft.issue=4&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E10-%3C%2Fspan%3E11&amp;rft.date=2006-07&amp;rft_id=info%3Adoi%2F10.1109%2Fmis.2006.70&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A9570832%23id-name%3DS2CID&amp;rft.aulast=Anderson&amp;rft.aufirst=M.&amp;rft.au=Anderson%2C+S.L.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-Anderson2007-6"><span class="mw-cite-backlink"><b><a href="#cite_ref-Anderson2007_6-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAndersonAnderson2007" class="citation journal cs1">Anderson M, Anderson SL (15 December 2007). "Machine Ethics: Creating an Ethical Intelligent Agent". <i>AI Magazine</i>. <b>28</b> (4): 15. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1609%2Faimag.v28i4.2065">10.1609/aimag.v28i4.2065</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:17033332">17033332</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=AI+Magazine&amp;rft.atitle=Machine+Ethics%3A+Creating+an+Ethical+Intelligent+Agent&amp;rft.volume=28&amp;rft.issue=4&amp;rft.pages=15&amp;rft.date=2007-12-15&amp;rft_id=info%3Adoi%2F10.1609%2Faimag.v28i4.2065&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A17033332%23id-name%3DS2CID&amp;rft.aulast=Anderson&amp;rft.aufirst=Michael&amp;rft.au=Anderson%2C+Susan+Leigh&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-7"><span class="mw-cite-backlink"><b><a href="#cite_ref-7">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBoyles2017" class="citation journal cs1">Boyles RJ (2017). <a rel="nofollow" class="external text" href="https://philarchive.org/rec/BOYPSF">"Philosophical Signposts for Artificial Moral Agent Frameworks"</a>. <i>Suri</i>. <b>6</b> (2): <span class="nowrap">92–</span>109.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Suri&amp;rft.atitle=Philosophical+Signposts+for+Artificial+Moral+Agent+Frameworks&amp;rft.volume=6&amp;rft.issue=2&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E92-%3C%2Fspan%3E109&amp;rft.date=2017&amp;rft.aulast=Boyles&amp;rft.aufirst=Robert+James+M.&amp;rft_id=https%3A%2F%2Fphilarchive.org%2Frec%2FBOYPSF&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-Winfield-2019-8"><span class="mw-cite-backlink">^ <a href="#cite_ref-Winfield-2019_8-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Winfield-2019_8-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWinfieldMichaelPittEvers2019" class="citation journal cs1">Winfield AF, Michael K, Pitt J, Evers V (March 2019). <a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FJPROC.2019.2900622">"Machine Ethics: The Design and Governance of Ethical AI and Autonomous Systems &#91;Scanning the Issue&#93;"</a>. <i>Proceedings of the IEEE</i>. <b>107</b> (3): <span class="nowrap">509–</span>517. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FJPROC.2019.2900622">10.1109/JPROC.2019.2900622</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1558-2256">1558-2256</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:77393713">77393713</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Proceedings+of+the+IEEE&amp;rft.atitle=Machine+Ethics%3A+The+Design+and+Governance+of+Ethical+AI+and+Autonomous+Systems+%5BScanning+the+Issue%5D&amp;rft.volume=107&amp;rft.issue=3&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E509-%3C%2Fspan%3E517&amp;rft.date=2019-03&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A77393713%23id-name%3DS2CID&amp;rft.issn=1558-2256&amp;rft_id=info%3Adoi%2F10.1109%2FJPROC.2019.2900622&amp;rft.aulast=Winfield&amp;rft.aufirst=A.+F.&amp;rft.au=Michael%2C+K.&amp;rft.au=Pitt%2C+J.&amp;rft.au=Evers%2C+V.&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1109%252FJPROC.2019.2900622&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-9"><span class="mw-cite-backlink"><b><a href="#cite_ref-9">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAl-Rodhan2015" class="citation news cs1">Al-Rodhan N (7 December 2015). <a rel="nofollow" class="external text" href="https://www.foreignaffairs.com/articles/2015-08-12/moral-code">"The Moral Code"</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170305044025/https://www.foreignaffairs.com/articles/2015-08-12/moral-code">Archived</a> from the original on 2017-03-05<span class="reference-accessdate">. Retrieved <span class="nowrap">2017-03-04</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.atitle=The+Moral+Code&amp;rft.date=2015-12-07&amp;rft.aulast=Al-Rodhan&amp;rft.aufirst=Nayef&amp;rft_id=https%3A%2F%2Fwww.foreignaffairs.com%2Farticles%2F2015-08-12%2Fmoral-code&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-10"><span class="mw-cite-backlink"><b><a href="#cite_ref-10">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSauer2022" class="citation web cs1">Sauer M (2022-04-08). <a rel="nofollow" class="external text" href="https://www.cnbc.com/2022/04/08/elon-musk-humans-could-eventually-download-their-brains-into-robots.html">"Elon Musk says humans could eventually download their brains into robots — and Grimes thinks Jeff Bezos would do it"</a>. <i>CNBC</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240925013113/https://www.cnbc.com/2022/04/08/elon-musk-humans-could-eventually-download-their-brains-into-robots.html">Archived</a> from the original on 2024-09-25<span class="reference-accessdate">. Retrieved <span class="nowrap">2024-04-07</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=CNBC&amp;rft.atitle=Elon+Musk+says+humans+could+eventually+download+their+brains+into+robots+%E2%80%94+and+Grimes+thinks+Jeff+Bezos+would+do+it&amp;rft.date=2022-04-08&amp;rft.aulast=Sauer&amp;rft.aufirst=Megan&amp;rft_id=https%3A%2F%2Fwww.cnbc.com%2F2022%2F04%2F08%2Felon-musk-humans-could-eventually-download-their-brains-into-robots.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-11"><span class="mw-cite-backlink"><b><a href="#cite_ref-11">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAnadiotis2022" class="citation web cs1">Anadiotis G (April 4, 2022). <a rel="nofollow" class="external text" href="https://www.zdnet.com/article/massaging-ai-language-models-for-fun-profit-and-ethics/">"Massaging AI language models for fun, profit and ethics"</a>. <i>ZDNET</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240925013214/https://www.zdnet.com/article/massaging-ai-language-models-for-fun-profit-and-ethics/">Archived</a> from the original on 2024-09-25<span class="reference-accessdate">. Retrieved <span class="nowrap">2024-04-07</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=ZDNET&amp;rft.atitle=Massaging+AI+language+models+for+fun%2C+profit+and+ethics&amp;rft.date=2022-04-04&amp;rft.aulast=Anadiotis&amp;rft.aufirst=George&amp;rft_id=https%3A%2F%2Fwww.zdnet.com%2Farticle%2Fmassaging-ai-language-models-for-fun-profit-and-ethics%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-Wallach2008-12"><span class="mw-cite-backlink"><b><a href="#cite_ref-Wallach2008_12-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWallachAllen2008" class="citation book cs1">Wallach W, Allen C (November 2008). <i>Moral Machines: Teaching Robots Right from Wrong</i>. USA: <a href="/wiki/Oxford_University_Press" title="Oxford University Press">Oxford University Press</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-19-537404-9" title="Special:BookSources/978-0-19-537404-9"><bdi>978-0-19-537404-9</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Moral+Machines%3A+Teaching+Robots+Right+from+Wrong&amp;rft.place=USA&amp;rft.pub=Oxford+University+Press&amp;rft.date=2008-11&amp;rft.isbn=978-0-19-537404-9&amp;rft.aulast=Wallach&amp;rft.aufirst=Wendell&amp;rft.au=Allen%2C+Colin&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-13"><span class="mw-cite-backlink"><b><a href="#cite_ref-13">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBostromYudkowsky2011" class="citation web cs1"><a href="/wiki/Nick_Bostrom" title="Nick Bostrom">Bostrom N</a>, <a href="/wiki/Eliezer_Yudkowsky" title="Eliezer Yudkowsky">Yudkowsky E</a> (2011). <a rel="nofollow" class="external text" href="http://www.nickbostrom.com/ethics/artificial-intelligence.pdf">"The Ethics of Artificial Intelligence"</a> <span class="cs1-format">(PDF)</span>. <i>Cambridge Handbook of Artificial Intelligence</i>. <a href="/wiki/Cambridge_Press" class="mw-redirect" title="Cambridge Press">Cambridge Press</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160304015020/http://www.nickbostrom.com/ethics/artificial-intelligence.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2016-03-04<span class="reference-accessdate">. Retrieved <span class="nowrap">2011-06-22</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Cambridge+Handbook+of+Artificial+Intelligence&amp;rft.atitle=The+Ethics+of+Artificial+Intelligence&amp;rft.date=2011&amp;rft.aulast=Bostrom&amp;rft.aufirst=Nick&amp;rft.au=Yudkowsky%2C+Eliezer&amp;rft_id=http%3A%2F%2Fwww.nickbostrom.com%2Fethics%2Fartificial-intelligence.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-SantosLang2002-14"><span class="mw-cite-backlink"><b><a href="#cite_ref-SantosLang2002_14-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSantos-Lang2002" class="citation web cs1">Santos-Lang C (2002). <a rel="nofollow" class="external text" href="http://santoslang.wordpress.com/article/ethics-for-artificial-intelligences-3iue30fi4gfq9-1">"Ethics for Artificial Intelligences"</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20141225093359/http://santoslang.wordpress.com/article/ethics-for-artificial-intelligences-3iue30fi4gfq9-1/">Archived</a> from the original on 2014-12-25<span class="reference-accessdate">. Retrieved <span class="nowrap">2015-01-04</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Ethics+for+Artificial+Intelligences&amp;rft.date=2002&amp;rft.aulast=Santos-Lang&amp;rft.aufirst=Chris&amp;rft_id=http%3A%2F%2Fsantoslang.wordpress.com%2Farticle%2Fethics-for-artificial-intelligences-3iue30fi4gfq9-1&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-Veruggio2002-15"><span class="mw-cite-backlink"><b><a href="#cite_ref-Veruggio2002_15-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFVeruggio,_Gianmarco2011" class="citation journal cs1">Veruggio, Gianmarco (2011). "The Roboethics Roadmap". <i>EURON Roboethics Atelier</i>. Scuola di Robotica: 2. <a href="/wiki/CiteSeerX_(identifier)" class="mw-redirect" title="CiteSeerX (identifier)">CiteSeerX</a>&#160;<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.466.2810">10.1.1.466.2810</a></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=EURON+Roboethics+Atelier&amp;rft.atitle=The+Roboethics+Roadmap&amp;rft.pages=2&amp;rft.date=2011&amp;rft_id=https%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fsummary%3Fdoi%3D10.1.1.466.2810%23id-name%3DCiteSeerX&amp;rft.au=Veruggio%2C+Gianmarco&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-16"><span class="mw-cite-backlink"><b><a href="#cite_ref-16">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMüller2020" class="citation cs2">Müller VC (2020), <a rel="nofollow" class="external text" href="https://plato.stanford.edu/archives/win2020/entries/ethics-ai/">"Ethics of Artificial Intelligence and Robotics"</a>, in Zalta EN (ed.), <i>The Stanford Encyclopedia of Philosophy</i> (Winter 2020&#160;ed.), Metaphysics Research Lab, Stanford University, <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210412140022/https://plato.stanford.edu/archives/win2020/entries/ethics-ai/">archived</a> from the original on 2021-04-12<span class="reference-accessdate">, retrieved <span class="nowrap">2021-03-18</span></span></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Ethics+of+Artificial+Intelligence+and+Robotics&amp;rft.btitle=The+Stanford+Encyclopedia+of+Philosophy&amp;rft.edition=Winter+2020&amp;rft.pub=Metaphysics+Research+Lab%2C+Stanford+University&amp;rft.date=2020&amp;rft.aulast=M%C3%BCller&amp;rft.aufirst=Vincent+C.&amp;rft_id=https%3A%2F%2Fplato.stanford.edu%2Farchives%2Fwin2020%2Fentries%2Fethics-ai%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-Jobin-2020-17"><span class="mw-cite-backlink">^ <a href="#cite_ref-Jobin-2020_17-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Jobin-2020_17-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFJobinIencaVayena2020" class="citation journal cs1">Jobin A, Ienca M, <a href="/wiki/Effy_Vayena" title="Effy Vayena">Vayena E</a> (2 September 2020). "The global landscape of AI ethics guidelines". <i>Nature</i>. <b>1</b> (9): <span class="nowrap">389–</span>399. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1906.11668">1906.11668</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fs42256-019-0088-2">10.1038/s42256-019-0088-2</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:201827642">201827642</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Nature&amp;rft.atitle=The+global+landscape+of+AI+ethics+guidelines&amp;rft.volume=1&amp;rft.issue=9&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E389-%3C%2Fspan%3E399&amp;rft.date=2020-09-02&amp;rft_id=info%3Aarxiv%2F1906.11668&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A201827642%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1038%2Fs42256-019-0088-2&amp;rft.aulast=Jobin&amp;rft.aufirst=Anna&amp;rft.au=Ienca%2C+Marcello&amp;rft.au=Vayena%2C+Effy&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-18"><span class="mw-cite-backlink"><b><a href="#cite_ref-18">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFloridiCowls2019" class="citation journal cs1">Floridi L, Cowls J (2 July 2019). <a rel="nofollow" class="external text" href="https://doi.org/10.1162%2F99608f92.8cd550d1">"A Unified Framework of Five Principles for AI in Society"</a>. <i>Harvard Data Science Review</i>. <b>1</b>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1162%2F99608f92.8cd550d1">10.1162/99608f92.8cd550d1</a></span>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:198775713">198775713</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Harvard+Data+Science+Review&amp;rft.atitle=A+Unified+Framework+of+Five+Principles+for+AI+in+Society&amp;rft.volume=1&amp;rft.date=2019-07-02&amp;rft_id=info%3Adoi%2F10.1162%2F99608f92.8cd550d1&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A198775713%23id-name%3DS2CID&amp;rft.aulast=Floridi&amp;rft.aufirst=Luciano&amp;rft.au=Cowls%2C+Josh&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1162%252F99608f92.8cd550d1&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-19"><span class="mw-cite-backlink"><b><a href="#cite_ref-19">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGabriel2018" class="citation web cs1">Gabriel I (2018-03-14). <a rel="nofollow" class="external text" href="https://medium.com/@Ethics_Society/the-case-for-fairer-algorithms-c008a12126f8">"The case for fairer algorithms – Iason Gabriel"</a>. <i>Medium</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190722080401/https://medium.com/@Ethics_Society/the-case-for-fairer-algorithms-c008a12126f8">Archived</a> from the original on 2019-07-22<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-07-22</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Medium&amp;rft.atitle=The+case+for+fairer+algorithms+%E2%80%93+Iason+Gabriel&amp;rft.date=2018-03-14&amp;rft.aulast=Gabriel&amp;rft.aufirst=Iason&amp;rft_id=https%3A%2F%2Fmedium.com%2F%40Ethics_Society%2Fthe-case-for-fairer-algorithms-c008a12126f8&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-20"><span class="mw-cite-backlink"><b><a href="#cite_ref-20">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://techcrunch.com/2016/12/10/5-unexpected-sources-of-bias-in-artificial-intelligence/">"5 unexpected sources of bias in artificial intelligence"</a>. <i>TechCrunch</i>. 10 December 2016. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210318060659/https://techcrunch.com/2016/12/10/5-unexpected-sources-of-bias-in-artificial-intelligence/">Archived</a> from the original on 2021-03-18<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-07-22</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=TechCrunch&amp;rft.atitle=5+unexpected+sources+of+bias+in+artificial+intelligence&amp;rft.date=2016-12-10&amp;rft_id=https%3A%2F%2Ftechcrunch.com%2F2016%2F12%2F10%2F5-unexpected-sources-of-bias-in-artificial-intelligence%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-21"><span class="mw-cite-backlink"><b><a href="#cite_ref-21">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKnight" class="citation web cs1">Knight W. <a rel="nofollow" class="external text" href="https://www.technologyreview.com/s/608986/forget-killer-robotsbias-is-the-real-ai-danger/">"Google's AI chief says forget Elon Musk's killer robots, and worry about bias in AI systems instead"</a>. <i>MIT Technology Review</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190704224752/https://www.technologyreview.com/s/608986/forget-killer-robotsbias-is-the-real-ai-danger/">Archived</a> from the original on 2019-07-04<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-07-22</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=MIT+Technology+Review&amp;rft.atitle=Google%27s+AI+chief+says+forget+Elon+Musk%27s+killer+robots%2C+and+worry+about+bias+in+AI+systems+instead&amp;rft.aulast=Knight&amp;rft.aufirst=Will&amp;rft_id=https%3A%2F%2Fwww.technologyreview.com%2Fs%2F608986%2Fforget-killer-robotsbias-is-the-real-ai-danger%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-22"><span class="mw-cite-backlink"><b><a href="#cite_ref-22">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFVillasenor2019" class="citation web cs1">Villasenor J (2019-01-03). <a rel="nofollow" class="external text" href="https://www.brookings.edu/blog/techtank/2019/01/03/artificial-intelligence-and-bias-four-key-challenges/">"Artificial intelligence and bias: Four key challenges"</a>. <i>Brookings</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190722080355/https://www.brookings.edu/blog/techtank/2019/01/03/artificial-intelligence-and-bias-four-key-challenges/">Archived</a> from the original on 2019-07-22<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-07-22</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Brookings&amp;rft.atitle=Artificial+intelligence+and+bias%3A+Four+key+challenges&amp;rft.date=2019-01-03&amp;rft.aulast=Villasenor&amp;rft.aufirst=John&amp;rft_id=https%3A%2F%2Fwww.brookings.edu%2Fblog%2Ftechtank%2F2019%2F01%2F03%2Fartificial-intelligence-and-bias-four-key-challenges%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-23"><span class="mw-cite-backlink"><b><a href="#cite_ref-23">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLohr2018" class="citation news cs1">Lohr S (9 February 2018). <a rel="nofollow" class="external text" href="https://www.nytimes.com/2018/02/09/technology/facial-recognition-race-artificial-intelligence.html">"Facial Recognition Is Accurate, if You're a White Guy"</a>. <i>The New York Times</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190109131036/https://www.nytimes.com/2018/02/09/technology/facial-recognition-race-artificial-intelligence.html">Archived</a> from the original on 9 January 2019<span class="reference-accessdate">. Retrieved <span class="nowrap">29 May</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+New+York+Times&amp;rft.atitle=Facial+Recognition+Is+Accurate%2C+if+You%27re+a+White+Guy&amp;rft.date=2018-02-09&amp;rft.aulast=Lohr&amp;rft.aufirst=Steve&amp;rft_id=https%3A%2F%2Fwww.nytimes.com%2F2018%2F02%2F09%2Ftechnology%2Ffacial-recognition-race-artificial-intelligence.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-24"><span class="mw-cite-backlink"><b><a href="#cite_ref-24">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKoeneckeNamLakeNudell2020" class="citation journal cs1"><a href="/wiki/Allison_Koenecke" title="Allison Koenecke">Koenecke A</a>, Nam A, Lake E, Nudell J, Quartey M, Mengesha Z, Toups C, Rickford JR, Jurafsky D, Goel S (7 April 2020). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7149386">"Racial disparities in automated speech recognition"</a>. <i>Proceedings of the National Academy of Sciences</i>. <b>117</b> (14): <span class="nowrap">7684–</span>7689. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2020PNAS..117.7684K">2020PNAS..117.7684K</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1073%2Fpnas.1915768117">10.1073/pnas.1915768117</a></span>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a>&#160;<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7149386">7149386</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a>&#160;<a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/32205437">32205437</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Proceedings+of+the+National+Academy+of+Sciences&amp;rft.atitle=Racial+disparities+in+automated+speech+recognition&amp;rft.volume=117&amp;rft.issue=14&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E7684-%3C%2Fspan%3E7689&amp;rft.date=2020-04-07&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC7149386%23id-name%3DPMC&amp;rft_id=info%3Apmid%2F32205437&amp;rft_id=info%3Adoi%2F10.1073%2Fpnas.1915768117&amp;rft_id=info%3Abibcode%2F2020PNAS..117.7684K&amp;rft.aulast=Koenecke&amp;rft.aufirst=Allison&amp;rft.au=Nam%2C+Andrew&amp;rft.au=Lake%2C+Emily&amp;rft.au=Nudell%2C+Joe&amp;rft.au=Quartey%2C+Minnie&amp;rft.au=Mengesha%2C+Zion&amp;rft.au=Toups%2C+Connor&amp;rft.au=Rickford%2C+John+R.&amp;rft.au=Jurafsky%2C+Dan&amp;rft.au=Goel%2C+Sharad&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC7149386&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-25"><span class="mw-cite-backlink"><b><a href="#cite_ref-25">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFNtoutsiFafaliosGadirajuIosifidis2020" class="citation journal cs1">Ntoutsi E, Fafalios P, Gadiraju U, Iosifidis V, Nejdl W, Vidal ME, Ruggieri S, Turini F, Papadopoulos S, Krasanakis E, Kompatsiaris I, Kinder-Kurlanda K, Wagner C, Karimi F, Fernandez M (May 2020). <a rel="nofollow" class="external text" href="https://wires.onlinelibrary.wiley.com/doi/10.1002/widm.1356">"Bias in data-driven artificial intelligence systems—An introductory survey"</a>. <i>WIREs Data Mining and Knowledge Discovery</i>. <b>10</b> (3). <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1002%2Fwidm.1356">10.1002/widm.1356</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1942-4787">1942-4787</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240925013154/https://wires.onlinelibrary.wiley.com/doi/10.1002/widm.1356">Archived</a> from the original on 2024-09-25<span class="reference-accessdate">. Retrieved <span class="nowrap">2023-12-14</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=WIREs+Data+Mining+and+Knowledge+Discovery&amp;rft.atitle=Bias+in+data-driven+artificial+intelligence+systems%E2%80%94An+introductory+survey&amp;rft.volume=10&amp;rft.issue=3&amp;rft.date=2020-05&amp;rft_id=info%3Adoi%2F10.1002%2Fwidm.1356&amp;rft.issn=1942-4787&amp;rft.aulast=Ntoutsi&amp;rft.aufirst=Eirini&amp;rft.au=Fafalios%2C+Pavlos&amp;rft.au=Gadiraju%2C+Ujwal&amp;rft.au=Iosifidis%2C+Vasileios&amp;rft.au=Nejdl%2C+Wolfgang&amp;rft.au=Vidal%2C+Maria-Esther&amp;rft.au=Ruggieri%2C+Salvatore&amp;rft.au=Turini%2C+Franco&amp;rft.au=Papadopoulos%2C+Symeon&amp;rft.au=Krasanakis%2C+Emmanouil&amp;rft.au=Kompatsiaris%2C+Ioannis&amp;rft.au=Kinder-Kurlanda%2C+Katharina&amp;rft.au=Wagner%2C+Claudia&amp;rft.au=Karimi%2C+Fariba&amp;rft.au=Fernandez%2C+Miriam&amp;rft_id=https%3A%2F%2Fwires.onlinelibrary.wiley.com%2Fdoi%2F10.1002%2Fwidm.1356&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-26"><span class="mw-cite-backlink"><b><a href="#cite_ref-26">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.reuters.com/article/us-amazon-com-jobs-automation-insight-idUSKCN1MK08G">"Amazon scraps secret AI recruiting tool that showed bias against women"</a>. <i>Reuters</i>. 2018-10-10. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190527181625/https://www.reuters.com/article/us-amazon-com-jobs-automation-insight-idUSKCN1MK08G">Archived</a> from the original on 2019-05-27<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-05-29</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Reuters&amp;rft.atitle=Amazon+scraps+secret+AI+recruiting+tool+that+showed+bias+against+women&amp;rft.date=2018-10-10&amp;rft_id=https%3A%2F%2Fwww.reuters.com%2Farticle%2Fus-amazon-com-jobs-automation-insight-idUSKCN1MK08G&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-27"><span class="mw-cite-backlink"><b><a href="#cite_ref-27">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFriedmanNissenbaum1996" class="citation journal cs1">Friedman B, Nissenbaum H (July 1996). <a rel="nofollow" class="external text" href="https://doi.org/10.1145%2F230538.230561">"Bias in computer systems"</a>. <i>ACM Transactions on Information Systems</i>. <b>14</b> (3): <span class="nowrap">330–</span>347. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1145%2F230538.230561">10.1145/230538.230561</a></span>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:207195759">207195759</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=ACM+Transactions+on+Information+Systems&amp;rft.atitle=Bias+in+computer+systems&amp;rft.volume=14&amp;rft.issue=3&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E330-%3C%2Fspan%3E347&amp;rft.date=1996-07&amp;rft_id=info%3Adoi%2F10.1145%2F230538.230561&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A207195759%23id-name%3DS2CID&amp;rft.aulast=Friedman&amp;rft.aufirst=Batya&amp;rft.au=Nissenbaum%2C+Helen&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1145%252F230538.230561&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-28"><span class="mw-cite-backlink"><b><a href="#cite_ref-28">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://techxplore.com/news/2019-07-bias-ai.html">"Eliminating bias in AI"</a>. <i>techxplore.com</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190725200844/https://techxplore.com/news/2019-07-bias-ai.html">Archived</a> from the original on 2019-07-25<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-07-26</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=techxplore.com&amp;rft.atitle=Eliminating+bias+in+AI&amp;rft_id=https%3A%2F%2Ftechxplore.com%2Fnews%2F2019-07-bias-ai.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-29"><span class="mw-cite-backlink"><b><a href="#cite_ref-29">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAbdallaWahleRuasNévéol2023" class="citation journal cs1">Abdalla M, Wahle JP, Ruas T, Névéol A, Ducel F, Mohammad S, Fort K (2023). Rogers A, Boyd-Graber J, Okazaki N (eds.). <a rel="nofollow" class="external text" href="https://aclanthology.org/2023.acl-long.734">"The Elephant in the Room: Analyzing the Presence of Big Tech in Natural Language Processing Research"</a>. <i>Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)</i>. Toronto, Canada: Association for Computational Linguistics: <span class="nowrap">13141–</span>13160. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2305.02797">2305.02797</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.18653%2Fv1%2F2023.acl-long.734">10.18653/v1/2023.acl-long.734</a></span>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240925013216/https://aclanthology.org/2023.acl-long.734/">Archived</a> from the original on 2024-09-25<span class="reference-accessdate">. Retrieved <span class="nowrap">2023-11-13</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Proceedings+of+the+61st+Annual+Meeting+of+the+Association+for+Computational+Linguistics+%28Volume+1%3A+Long+Papers%29&amp;rft.atitle=The+Elephant+in+the+Room%3A+Analyzing+the+Presence+of+Big+Tech+in+Natural+Language+Processing+Research&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E13141-%3C%2Fspan%3E13160&amp;rft.date=2023&amp;rft_id=info%3Aarxiv%2F2305.02797&amp;rft_id=info%3Adoi%2F10.18653%2Fv1%2F2023.acl-long.734&amp;rft.aulast=Abdalla&amp;rft.aufirst=Mohamed&amp;rft.au=Wahle%2C+Jan+Philip&amp;rft.au=Ruas%2C+Terry&amp;rft.au=N%C3%A9v%C3%A9ol%2C+Aur%C3%A9lie&amp;rft.au=Ducel%2C+Fanny&amp;rft.au=Mohammad%2C+Saif&amp;rft.au=Fort%2C+Karen&amp;rft_id=https%3A%2F%2Faclanthology.org%2F2023.acl-long.734&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-30"><span class="mw-cite-backlink"><b><a href="#cite_ref-30">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFOlson" class="citation web cs1">Olson P. <a rel="nofollow" class="external text" href="https://www.forbes.com/sites/parmyolson/2018/03/13/google-deepmind-ai-machine-learning-bias/">"Google's DeepMind Has An Idea For Stopping Biased AI"</a>. <i>Forbes</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190726082959/https://www.forbes.com/sites/parmyolson/2018/03/13/google-deepmind-ai-machine-learning-bias/">Archived</a> from the original on 2019-07-26<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-07-26</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Forbes&amp;rft.atitle=Google%27s+DeepMind+Has+An+Idea+For+Stopping+Biased+AI&amp;rft.aulast=Olson&amp;rft.aufirst=Parmy&amp;rft_id=https%3A%2F%2Fwww.forbes.com%2Fsites%2Fparmyolson%2F2018%2F03%2F13%2Fgoogle-deepmind-ai-machine-learning-bias%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-31"><span class="mw-cite-backlink"><b><a href="#cite_ref-31">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://developers.google.com/machine-learning/fairness-overview/">"Machine Learning Fairness | ML Fairness"</a>. <i>Google Developers</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190810004754/https://developers.google.com/machine-learning/fairness-overview/">Archived</a> from the original on 2019-08-10<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-07-26</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Google+Developers&amp;rft.atitle=Machine+Learning+Fairness+%7C+ML+Fairness&amp;rft_id=https%3A%2F%2Fdevelopers.google.com%2Fmachine-learning%2Ffairness-overview%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-32"><span class="mw-cite-backlink"><b><a href="#cite_ref-32">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.research.ibm.com/5-in-5/ai-and-bias/">"AI and bias – IBM Research – US"</a>. <i>www.research.ibm.com</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190717175957/http://www.research.ibm.com/5-in-5/ai-and-bias/">Archived</a> from the original on 2019-07-17<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-07-26</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=www.research.ibm.com&amp;rft.atitle=AI+and+bias+%E2%80%93+IBM+Research+%E2%80%93+US&amp;rft_id=https%3A%2F%2Fwww.research.ibm.com%2F5-in-5%2Fai-and-bias%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-33"><span class="mw-cite-backlink"><b><a href="#cite_ref-33">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBenderFriedman2018" class="citation journal cs1">Bender EM, Friedman B (December 2018). <a rel="nofollow" class="external text" href="https://doi.org/10.1162%2Ftacl_a_00041">"Data Statements for Natural Language Processing: Toward Mitigating System Bias and Enabling Better Science"</a>. <i>Transactions of the Association for Computational Linguistics</i>. <b>6</b>: <span class="nowrap">587–</span>604. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1162%2Ftacl_a_00041">10.1162/tacl_a_00041</a></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Transactions+of+the+Association+for+Computational+Linguistics&amp;rft.atitle=Data+Statements+for+Natural+Language+Processing%3A+Toward+Mitigating+System+Bias+and+Enabling+Better+Science&amp;rft.volume=6&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E587-%3C%2Fspan%3E604&amp;rft.date=2018-12&amp;rft_id=info%3Adoi%2F10.1162%2Ftacl_a_00041&amp;rft.aulast=Bender&amp;rft.aufirst=Emily+M.&amp;rft.au=Friedman%2C+Batya&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1162%252Ftacl_a_00041&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-34"><span class="mw-cite-backlink"><b><a href="#cite_ref-34">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGebruMorgensternVecchioneVaughan2018" class="citation arxiv cs1">Gebru T, Morgenstern J, Vecchione B, Vaughan JW, <a href="/wiki/Hanna_Wallach" title="Hanna Wallach">Wallach H</a>, Daumé III H, Crawford K (2018). "Datasheets for Datasets". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1803.09010">1803.09010</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.DB">cs.DB</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Datasheets+for+Datasets&amp;rft.date=2018&amp;rft_id=info%3Aarxiv%2F1803.09010&amp;rft.aulast=Gebru&amp;rft.aufirst=Timnit&amp;rft.au=Morgenstern%2C+Jamie&amp;rft.au=Vecchione%2C+Briana&amp;rft.au=Vaughan%2C+Jennifer+Wortman&amp;rft.au=Wallach%2C+Hanna&amp;rft.au=Daum%C3%A9+III%2C+Hal&amp;rft.au=Crawford%2C+Kate&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-35"><span class="mw-cite-backlink"><b><a href="#cite_ref-35">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFPery2021" class="citation web cs1">Pery A (2021-10-06). <a rel="nofollow" class="external text" href="https://deepai.org/publication/trustworthy-artificial-intelligence-and-process-mining-challenges-and-opportunities">"Trustworthy Artificial Intelligence and Process Mining: Challenges and Opportunities"</a>. <i>DeepAI</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220218200006/https://deepai.org/publication/trustworthy-artificial-intelligence-and-process-mining-challenges-and-opportunities">Archived</a> from the original on 2022-02-18<span class="reference-accessdate">. Retrieved <span class="nowrap">2022-02-18</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=DeepAI&amp;rft.atitle=Trustworthy+Artificial+Intelligence+and+Process+Mining%3A+Challenges+and+Opportunities&amp;rft.date=2021-10-06&amp;rft.aulast=Pery&amp;rft.aufirst=Andrew&amp;rft_id=https%3A%2F%2Fdeepai.org%2Fpublication%2Ftrustworthy-artificial-intelligence-and-process-mining-challenges-and-opportunities&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-36"><span class="mw-cite-backlink"><b><a href="#cite_ref-36">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKnight" class="citation web cs1">Knight W. <a rel="nofollow" class="external text" href="https://www.technologyreview.com/s/608986/forget-killer-robotsbias-is-the-real-ai-danger/">"Google's AI chief says forget Elon Musk's killer robots, and worry about bias in AI systems instead"</a>. <i>MIT Technology Review</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190704224752/https://www.technologyreview.com/s/608986/forget-killer-robotsbias-is-the-real-ai-danger/">Archived</a> from the original on 2019-07-04<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-07-26</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=MIT+Technology+Review&amp;rft.atitle=Google%27s+AI+chief+says+forget+Elon+Musk%27s+killer+robots%2C+and+worry+about+bias+in+AI+systems+instead&amp;rft.aulast=Knight&amp;rft.aufirst=Will&amp;rft_id=https%3A%2F%2Fwww.technologyreview.com%2Fs%2F608986%2Fforget-killer-robotsbias-is-the-real-ai-danger%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-37"><span class="mw-cite-backlink"><b><a href="#cite_ref-37">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://map.ai-global.org/">"Where in the World is AI? Responsible &amp; Unethical AI Examples"</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20201031034143/https://map.ai-global.org/">Archived</a> from the original on 2020-10-31<span class="reference-accessdate">. Retrieved <span class="nowrap">2020-10-28</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Where+in+the+World+is+AI%3F+Responsible+%26+Unethical+AI+Examples&amp;rft_id=https%3A%2F%2Fmap.ai-global.org%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-38"><span class="mw-cite-backlink"><b><a href="#cite_ref-38">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRuggieriAlvarezPugnanaState2023" class="citation journal cs1">Ruggieri S, Alvarez JM, Pugnana A, State L, Turini F (2023-06-26). <a rel="nofollow" class="external text" href="https://doi.org/10.1609%2Faaai.v37i13.26798">"Can We Trust Fair-AI?"</a>. <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>. <b>37</b> (13). Association for the Advancement of Artificial Intelligence (AAAI): <span class="nowrap">15421–</span>15430. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1609%2Faaai.v37i13.26798">10.1609/aaai.v37i13.26798</a></span>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://hdl.handle.net/11384%2F136444">11384/136444</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2374-3468">2374-3468</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:259678387">259678387</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Proceedings+of+the+AAAI+Conference+on+Artificial+Intelligence&amp;rft.atitle=Can+We+Trust+Fair-AI%3F&amp;rft.volume=37&amp;rft.issue=13&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E15421-%3C%2Fspan%3E15430&amp;rft.date=2023-06-26&amp;rft_id=info%3Ahdl%2F11384%2F136444&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A259678387%23id-name%3DS2CID&amp;rft.issn=2374-3468&amp;rft_id=info%3Adoi%2F10.1609%2Faaai.v37i13.26798&amp;rft.aulast=Ruggieri&amp;rft.aufirst=Salvatore&amp;rft.au=Alvarez%2C+Jose+M.&amp;rft.au=Pugnana%2C+Andrea&amp;rft.au=State%2C+Laura&amp;rft.au=Turini%2C+Franco&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1609%252Faaai.v37i13.26798&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-Buyl_De_Bie_p.2-39"><span class="mw-cite-backlink"><b><a href="#cite_ref-Buyl_De_Bie_p.2_39-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBuylDe_Bie2022" class="citation journal cs1">Buyl M, De Bie T (2022). "Inherent Limitations of AI Fairness". <i>Communications of the ACM</i>. <b>67</b> (2): <span class="nowrap">48–</span>55. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2212.06495">2212.06495</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1145%2F3624700">10.1145/3624700</a>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<a rel="nofollow" class="external text" href="https://hdl.handle.net/1854%2FLU-01GMNH04RGNVWJ730BJJXGCY99">1854/LU-01GMNH04RGNVWJ730BJJXGCY99</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Communications+of+the+ACM&amp;rft.atitle=Inherent+Limitations+of+AI+Fairness&amp;rft.volume=67&amp;rft.issue=2&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E48-%3C%2Fspan%3E55&amp;rft.date=2022&amp;rft_id=info%3Aarxiv%2F2212.06495&amp;rft_id=info%3Ahdl%2F1854%2FLU-01GMNH04RGNVWJ730BJJXGCY99&amp;rft_id=info%3Adoi%2F10.1145%2F3624700&amp;rft.aulast=Buyl&amp;rft.aufirst=Maarten&amp;rft.au=De+Bie%2C+Tijl&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-40"><span class="mw-cite-backlink"><b><a href="#cite_ref-40">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCastelnovoInverardiNaninoPenco2023" class="citation arxiv cs1">Castelnovo A, Inverardi N, Nanino G, Penco IG, Regoli D (2023). "Fair Enough? A map of the current limitations of the requirements to have "fair" algorithms". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2311.12435">2311.12435</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.AI">cs.AI</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Fair+Enough%3F+A+map+of+the+current+limitations+of+the+requirements+to+have+%22fair%22+algorithms&amp;rft.date=2023&amp;rft_id=info%3Aarxiv%2F2311.12435&amp;rft.aulast=Castelnovo&amp;rft.aufirst=Alessandro&amp;rft.au=Inverardi%2C+Nicole&amp;rft.au=Nanino%2C+Gabriele&amp;rft.au=Penco%2C+Ilaria+Giuseppina&amp;rft.au=Regoli%2C+Daniele&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-41"><span class="mw-cite-backlink"><b><a href="#cite_ref-41">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFederspielMitchellAsokanUmana2023" class="citation journal cs1">Federspiel F, Mitchell R, Asokan A, Umana C, McCoy D (May 2023). <a rel="nofollow" class="external text" href="https://dx.doi.org/10.1136/bmjgh-2022-010435">"Threats by artificial intelligence to human health and human existence"</a>. <i>BMJ Global Health</i>. <b>8</b> (5): e010435. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1136%2Fbmjgh-2022-010435">10.1136/bmjgh-2022-010435</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2059-7908">2059-7908</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a>&#160;<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10186390">10186390</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a>&#160;<a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/37160371">37160371</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240925013122/https://gh.bmj.com/content/8/5/e010435">Archived</a> from the original on 2024-09-25<span class="reference-accessdate">. Retrieved <span class="nowrap">2024-04-21</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=BMJ+Global+Health&amp;rft.atitle=Threats+by+artificial+intelligence+to+human+health+and+human+existence&amp;rft.volume=8&amp;rft.issue=5&amp;rft.pages=e010435&amp;rft.date=2023-05&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC10186390%23id-name%3DPMC&amp;rft.issn=2059-7908&amp;rft_id=info%3Apmid%2F37160371&amp;rft_id=info%3Adoi%2F10.1136%2Fbmjgh-2022-010435&amp;rft.aulast=Federspiel&amp;rft.aufirst=Frederik&amp;rft.au=Mitchell%2C+Ruth&amp;rft.au=Asokan%2C+Asha&amp;rft.au=Umana%2C+Carlos&amp;rft.au=McCoy%2C+David&amp;rft_id=http%3A%2F%2Fdx.doi.org%2F10.1136%2Fbmjgh-2022-010435&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-Spindler-20232-42"><span class="mw-cite-backlink">^ <a href="#cite_ref-Spindler-20232_42-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Spindler-20232_42-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSpindler2023" class="citation cs2">Spindler G (2023), <a rel="nofollow" class="external text" href="https://dx.doi.org/10.5771/9783748942030-41">"Different approaches for liability of Artificial Intelligence – Pros and Cons"</a>, <i>Liability for AI</i>, Nomos Verlagsgesellschaft mbH &amp; Co. KG, pp.&#160;<span class="nowrap">41–</span>96, <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.5771%2F9783748942030-41">10.5771/9783748942030-41</a>, <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-3-7489-4203-0" title="Special:BookSources/978-3-7489-4203-0"><bdi>978-3-7489-4203-0</bdi></a>, <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240925013122/https://www.nomos-elibrary.de/10.5771/9783748942030/liability-for-ai?page=1">archived</a> from the original on 2024-09-25<span class="reference-accessdate">, retrieved <span class="nowrap">2023-12-14</span></span></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Liability+for+AI&amp;rft.atitle=Different+approaches+for+liability+of+Artificial+Intelligence+%E2%80%93+Pros+and+Cons&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E41-%3C%2Fspan%3E96&amp;rft.date=2023&amp;rft_id=info%3Adoi%2F10.5771%2F9783748942030-41&amp;rft.isbn=978-3-7489-4203-0&amp;rft.aulast=Spindler&amp;rft.aufirst=Gerald&amp;rft_id=http%3A%2F%2Fdx.doi.org%2F10.5771%2F9783748942030-41&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-43"><span class="mw-cite-backlink"><b><a href="#cite_ref-43">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFManyika2022" class="citation journal cs1">Manyika J (2022). <a rel="nofollow" class="external text" href="https://doi.org/10.1162%2Fdaed_e_01897">"Getting AI Right: Introductory Notes on AI &amp; Society"</a>. <i>Daedalus</i>. <b>151</b> (2): <span class="nowrap">5–</span>27. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1162%2Fdaed_e_01897">10.1162/daed_e_01897</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0011-5266">0011-5266</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Daedalus&amp;rft.atitle=Getting+AI+Right%3A+Introductory+Notes+on+AI+%26+Society&amp;rft.volume=151&amp;rft.issue=2&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E5-%3C%2Fspan%3E27&amp;rft.date=2022&amp;rft_id=info%3Adoi%2F10.1162%2Fdaed_e_01897&amp;rft.issn=0011-5266&amp;rft.aulast=Manyika&amp;rft.aufirst=James&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1162%252Fdaed_e_01897&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-44"><span class="mw-cite-backlink"><b><a href="#cite_ref-44">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFImranPosokhovaQureshiMasood2020" class="citation journal cs1">Imran A, Posokhova I, Qureshi HN, Masood U, Riaz MS, Ali K, John CN, Hussain MI, Nabeel M (2020-01-01). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7318970">"AI4COVID-19: AI enabled preliminary diagnosis for COVID-19 from cough samples via an app"</a>. <i>Informatics in Medicine Unlocked</i>. <b>20</b>: 100378. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.imu.2020.100378">10.1016/j.imu.2020.100378</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2352-9148">2352-9148</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a>&#160;<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7318970">7318970</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a>&#160;<a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/32839734">32839734</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Informatics+in+Medicine+Unlocked&amp;rft.atitle=AI4COVID-19%3A+AI+enabled+preliminary+diagnosis+for+COVID-19+from+cough+samples+via+an+app&amp;rft.volume=20&amp;rft.pages=100378&amp;rft.date=2020-01-01&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC7318970%23id-name%3DPMC&amp;rft.issn=2352-9148&amp;rft_id=info%3Apmid%2F32839734&amp;rft_id=info%3Adoi%2F10.1016%2Fj.imu.2020.100378&amp;rft.aulast=Imran&amp;rft.aufirst=Ali&amp;rft.au=Posokhova%2C+Iryna&amp;rft.au=Qureshi%2C+Haneya+N.&amp;rft.au=Masood%2C+Usama&amp;rft.au=Riaz%2C+Muhammad+Sajid&amp;rft.au=Ali%2C+Kamran&amp;rft.au=John%2C+Charles+N.&amp;rft.au=Hussain%2C+MD+Iftikhar&amp;rft.au=Nabeel%2C+Muhammad&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC7318970&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-45"><span class="mw-cite-backlink"><b><a href="#cite_ref-45">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCirilloCatuara-SolarzMoreyGuney2020" class="citation journal cs1">Cirillo D, Catuara-Solarz S, Morey C, Guney E, Subirats L, Mellino S, Gigante A, Valencia A, Rementeria MJ, Chadha AS, Mavridis N (2020-06-01). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7264169">"Sex and gender differences and biases in artificial intelligence for biomedicine and healthcare"</a>. <i>npj Digital Medicine</i>. <b>3</b> (1): 81. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fs41746-020-0288-5">10.1038/s41746-020-0288-5</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2398-6352">2398-6352</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a>&#160;<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7264169">7264169</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a>&#160;<a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/32529043">32529043</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=npj+Digital+Medicine&amp;rft.atitle=Sex+and+gender+differences+and+biases+in+artificial+intelligence+for+biomedicine+and+healthcare&amp;rft.volume=3&amp;rft.issue=1&amp;rft.pages=81&amp;rft.date=2020-06-01&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC7264169%23id-name%3DPMC&amp;rft.issn=2398-6352&amp;rft_id=info%3Apmid%2F32529043&amp;rft_id=info%3Adoi%2F10.1038%2Fs41746-020-0288-5&amp;rft.aulast=Cirillo&amp;rft.aufirst=Davide&amp;rft.au=Catuara-Solarz%2C+Silvina&amp;rft.au=Morey%2C+Czuee&amp;rft.au=Guney%2C+Emre&amp;rft.au=Subirats%2C+Laia&amp;rft.au=Mellino%2C+Simona&amp;rft.au=Gigante%2C+Annalisa&amp;rft.au=Valencia%2C+Alfonso&amp;rft.au=Rementeria%2C+Mar%C3%ADa+Jos%C3%A9&amp;rft.au=Chadha%2C+Antonella+Santuccione&amp;rft.au=Mavridis%2C+Nikolaos&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC7264169&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-46"><span class="mw-cite-backlink"><b><a href="#cite_ref-46">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFChristian2021" class="citation book cs1">Christian B (2021). <i>The alignment problem: machine learning and human values</i> (First published as a Norton paperback&#160;ed.). New York, NY: W. W. Norton &amp; Company. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-393-86833-3" title="Special:BookSources/978-0-393-86833-3"><bdi>978-0-393-86833-3</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=The+alignment+problem%3A+machine+learning+and+human+values&amp;rft.place=New+York%2C+NY&amp;rft.edition=First+published+as+a+Norton+paperback&amp;rft.pub=W.+W.+Norton+%26+Company&amp;rft.date=2021&amp;rft.isbn=978-0-393-86833-3&amp;rft.aulast=Christian&amp;rft.aufirst=Brian&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-47"><span class="mw-cite-backlink"><b><a href="#cite_ref-47">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFNtoutsiFafaliosGadirajuIosifidis2020" class="citation journal cs1">Ntoutsi E, Fafalios P, Gadiraju U, Iosifidis V, Nejdl W, Vidal ME, Ruggieri S, Turini F, Papadopoulos S, Krasanakis E, Kompatsiaris I, Kinder-Kurlanda K, Wagner C, Karimi F, Fernandez M (May 2020). <a rel="nofollow" class="external text" href="https://doi.org/10.1002%2Fwidm.1356">"Bias in data-driven artificial intelligence systems—An introductory survey"</a>. <i>WIREs Data Mining and Knowledge Discovery</i>. <b>10</b> (3). <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1002%2Fwidm.1356">10.1002/widm.1356</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1942-4787">1942-4787</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=WIREs+Data+Mining+and+Knowledge+Discovery&amp;rft.atitle=Bias+in+data-driven+artificial+intelligence+systems%E2%80%94An+introductory+survey&amp;rft.volume=10&amp;rft.issue=3&amp;rft.date=2020-05&amp;rft_id=info%3Adoi%2F10.1002%2Fwidm.1356&amp;rft.issn=1942-4787&amp;rft.aulast=Ntoutsi&amp;rft.aufirst=Eirini&amp;rft.au=Fafalios%2C+Pavlos&amp;rft.au=Gadiraju%2C+Ujwal&amp;rft.au=Iosifidis%2C+Vasileios&amp;rft.au=Nejdl%2C+Wolfgang&amp;rft.au=Vidal%2C+Maria-Esther&amp;rft.au=Ruggieri%2C+Salvatore&amp;rft.au=Turini%2C+Franco&amp;rft.au=Papadopoulos%2C+Symeon&amp;rft.au=Krasanakis%2C+Emmanouil&amp;rft.au=Kompatsiaris%2C+Ioannis&amp;rft.au=Kinder-Kurlanda%2C+Katharina&amp;rft.au=Wagner%2C+Claudia&amp;rft.au=Karimi%2C+Fariba&amp;rft.au=Fernandez%2C+Miriam&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1002%252Fwidm.1356&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-Luo-2023-48"><span class="mw-cite-backlink"><b><a href="#cite_ref-Luo-2023_48-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLuoPuettSmith2023" class="citation arxiv cs1">Luo Q, Puett MJ, Smith MD (2023-03-28). "A Perspectival Mirror of the Elephant: Investigating Language Bias on Google, ChatGPT, Wikipedia, and YouTube". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2303.16281v2">2303.16281v2</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CY">cs.CY</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=A+Perspectival+Mirror+of+the+Elephant%3A+Investigating+Language+Bias+on+Google%2C+ChatGPT%2C+Wikipedia%2C+and+YouTube&amp;rft.date=2023-03-28&amp;rft_id=info%3Aarxiv%2F2303.16281v2&amp;rft.aulast=Luo&amp;rft.aufirst=Queenie&amp;rft.au=Puett%2C+Michael+J.&amp;rft.au=Smith%2C+Michael+D.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-49"><span class="mw-cite-backlink"><b><a href="#cite_ref-49">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBuskerChoenniShoae_Bargh2023" class="citation book cs1">Busker T, Choenni S, Shoae Bargh M (2023-11-20). "Stereotypes in ChatGPT: An empirical study". <i>Proceedings of the 16th International Conference on Theory and Practice of Electronic Governance</i>. ICEGOV '23. New York, NY, USA: Association for Computing Machinery. pp.&#160;<span class="nowrap">24–</span>32. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1145%2F3614321.3614325">10.1145/3614321.3614325</a></span>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/979-8-4007-0742-1" title="Special:BookSources/979-8-4007-0742-1"><bdi>979-8-4007-0742-1</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Stereotypes+in+ChatGPT%3A+An+empirical+study&amp;rft.btitle=Proceedings+of+the+16th+International+Conference+on+Theory+and+Practice+of+Electronic+Governance&amp;rft.place=New+York%2C+NY%2C+USA&amp;rft.series=ICEGOV+%2723&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E24-%3C%2Fspan%3E32&amp;rft.pub=Association+for+Computing+Machinery&amp;rft.date=2023-11-20&amp;rft_id=info%3Adoi%2F10.1145%2F3614321.3614325&amp;rft.isbn=979-8-4007-0742-1&amp;rft.aulast=Busker&amp;rft.aufirst=Tony&amp;rft.au=Choenni%2C+Sunil&amp;rft.au=Shoae+Bargh%2C+Mortaza&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-50"><span class="mw-cite-backlink"><b><a href="#cite_ref-50">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKotekDockumSun2023" class="citation book cs1">Kotek H, Dockum R, Sun D (2023-11-05). "Gender bias and stereotypes in Large Language Models". <i>Proceedings of the ACM Collective Intelligence Conference</i>. CI '23. New York, NY, USA: Association for Computing Machinery. pp.&#160;<span class="nowrap">12–</span>24. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2308.14921">2308.14921</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1145%2F3582269.3615599">10.1145/3582269.3615599</a></span>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/979-8-4007-0113-9" title="Special:BookSources/979-8-4007-0113-9"><bdi>979-8-4007-0113-9</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Gender+bias+and+stereotypes+in+Large+Language+Models&amp;rft.btitle=Proceedings+of+the+ACM+Collective+Intelligence+Conference&amp;rft.place=New+York%2C+NY%2C+USA&amp;rft.series=CI+%2723&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E12-%3C%2Fspan%3E24&amp;rft.pub=Association+for+Computing+Machinery&amp;rft.date=2023-11-05&amp;rft_id=info%3Aarxiv%2F2308.14921&amp;rft_id=info%3Adoi%2F10.1145%2F3582269.3615599&amp;rft.isbn=979-8-4007-0113-9&amp;rft.aulast=Kotek&amp;rft.aufirst=Hadas&amp;rft.au=Dockum%2C+Rikker&amp;rft.au=Sun%2C+David&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-51"><span class="mw-cite-backlink"><b><a href="#cite_ref-51">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFederspielMitchellAsokanUmana2023" class="citation journal cs1">Federspiel F, Mitchell R, Asokan A, Umana C, McCoy D (May 2023). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10186390">"Threats by artificial intelligence to human health and human existence"</a>. <i>BMJ Global Health</i>. <b>8</b> (5): e010435. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1136%2Fbmjgh-2022-010435">10.1136/bmjgh-2022-010435</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2059-7908">2059-7908</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a>&#160;<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10186390">10186390</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a>&#160;<a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/37160371">37160371</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=BMJ+Global+Health&amp;rft.atitle=Threats+by+artificial+intelligence+to+human+health+and+human+existence&amp;rft.volume=8&amp;rft.issue=5&amp;rft.pages=e010435&amp;rft.date=2023-05&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC10186390%23id-name%3DPMC&amp;rft.issn=2059-7908&amp;rft_id=info%3Apmid%2F37160371&amp;rft_id=info%3Adoi%2F10.1136%2Fbmjgh-2022-010435&amp;rft.aulast=Federspiel&amp;rft.aufirst=Frederik&amp;rft.au=Mitchell%2C+Ruth&amp;rft.au=Asokan%2C+Asha&amp;rft.au=Umana%2C+Carlos&amp;rft.au=McCoy%2C+David&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC10186390&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-52"><span class="mw-cite-backlink"><b><a href="#cite_ref-52">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFengParkLiuTsvetkov2023" class="citation journal cs1">Feng S, Park CY, Liu Y, Tsvetkov Y (July 2023). Rogers A, Boyd-Graber J, Okazaki N (eds.). <a rel="nofollow" class="external text" href="https://aclanthology.org/2023.acl-long.656">"From Pretraining Data to Language Models to Downstream Tasks: Tracking the Trails of Political Biases Leading to Unfair NLP Models"</a>. <i>Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)</i>. Toronto, Canada: Association for Computational Linguistics: <span class="nowrap">11737–</span>11762. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2305.08283">2305.08283</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.18653%2Fv1%2F2023.acl-long.656">10.18653/v1/2023.acl-long.656</a></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Proceedings+of+the+61st+Annual+Meeting+of+the+Association+for+Computational+Linguistics+%28Volume+1%3A+Long+Papers%29&amp;rft.atitle=From+Pretraining+Data+to+Language+Models+to+Downstream+Tasks%3A+Tracking+the+Trails+of+Political+Biases+Leading+to+Unfair+NLP+Models&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E11737-%3C%2Fspan%3E11762&amp;rft.date=2023-07&amp;rft_id=info%3Aarxiv%2F2305.08283&amp;rft_id=info%3Adoi%2F10.18653%2Fv1%2F2023.acl-long.656&amp;rft.aulast=Feng&amp;rft.aufirst=Shangbin&amp;rft.au=Park%2C+Chan+Young&amp;rft.au=Liu%2C+Yuhan&amp;rft.au=Tsvetkov%2C+Yulia&amp;rft_id=https%3A%2F%2Faclanthology.org%2F2023.acl-long.656&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-53"><span class="mw-cite-backlink"><b><a href="#cite_ref-53">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFZhouTan2023" class="citation journal cs1">Zhou K, Tan C (December 2023). Bouamor H, Pino J, Bali K (eds.). <a rel="nofollow" class="external text" href="https://aclanthology.org/2023.findings-emnlp.696">"Entity-Based Evaluation of Political Bias in Automatic Summarization"</a>. <i>Findings of the Association for Computational Linguistics: EMNLP 2023</i>. Singapore: Association for Computational Linguistics: <span class="nowrap">10374–</span>10386. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2305.02321">2305.02321</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.18653%2Fv1%2F2023.findings-emnlp.696">10.18653/v1/2023.findings-emnlp.696</a></span>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240424141927/https://aclanthology.org/2023.findings-emnlp.696/">Archived</a> from the original on 2024-04-24<span class="reference-accessdate">. Retrieved <span class="nowrap">2023-12-25</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Findings+of+the+Association+for+Computational+Linguistics%3A+EMNLP+2023&amp;rft.atitle=Entity-Based+Evaluation+of+Political+Bias+in+Automatic+Summarization&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E10374-%3C%2Fspan%3E10386&amp;rft.date=2023-12&amp;rft_id=info%3Aarxiv%2F2305.02321&amp;rft_id=info%3Adoi%2F10.18653%2Fv1%2F2023.findings-emnlp.696&amp;rft.aulast=Zhou&amp;rft.aufirst=Karen&amp;rft.au=Tan%2C+Chenhao&amp;rft_id=https%3A%2F%2Faclanthology.org%2F2023.findings-emnlp.696&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-54"><span class="mw-cite-backlink"><b><a href="#cite_ref-54">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFChengDurmusJurafsky2023" class="citation arxiv cs1">Cheng M, Durmus E, Jurafsky D (2023-05-29). "Marked Personas: Using Natural Language Prompts to Measure Stereotypes in Language Models". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2305.18189v1">2305.18189v1</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CL">cs.CL</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Marked+Personas%3A+Using+Natural+Language+Prompts+to+Measure+Stereotypes+in+Language+Models&amp;rft.date=2023-05-29&amp;rft_id=info%3Aarxiv%2F2305.18189v1&amp;rft.aulast=Cheng&amp;rft.aufirst=Myra&amp;rft.au=Durmus%2C+Esin&amp;rft.au=Jurafsky%2C+Dan&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-55"><span class="mw-cite-backlink"><b><a href="#cite_ref-55">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHammond2023" class="citation web cs1">Hammond G (27 December 2023). <a rel="nofollow" class="external text" href="https://arstechnica.com/ai/2023/12/big-tech-is-spending-more-than-vc-firms-on-ai-startups/">"Big Tech is spending more than VC firms on AI startups"</a>. <i>Ars Technica</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240110195706/https://arstechnica.com/ai/2023/12/big-tech-is-spending-more-than-vc-firms-on-ai-startups/">Archived</a> from the original on Jan 10, 2024.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Ars+Technica&amp;rft.atitle=Big+Tech+is+spending+more+than+VC+firms+on+AI+startups&amp;rft.date=2023-12-27&amp;rft.aulast=Hammond&amp;rft.aufirst=George&amp;rft_id=https%3A%2F%2Farstechnica.com%2Fai%2F2023%2F12%2Fbig-tech-is-spending-more-than-vc-firms-on-ai-startups%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-56"><span class="mw-cite-backlink"><b><a href="#cite_ref-56">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWong2023" class="citation web cs1">Wong M (24 October 2023). <span class="id-lock-subscription" title="Paid subscription required"><a rel="nofollow" class="external text" href="https://www.theatlantic.com/technology/archive/2023/10/big-ai-silicon-valley-dominance/675752/">"The Future of AI Is GOMA"</a></span>. <i>The Atlantic</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240105020744/https://www.theatlantic.com/technology/archive/2023/10/big-ai-silicon-valley-dominance/675752/">Archived</a> from the original on Jan 5, 2024.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=The+Atlantic&amp;rft.atitle=The+Future+of+AI+Is+GOMA&amp;rft.date=2023-10-24&amp;rft.aulast=Wong&amp;rft.aufirst=Matteo&amp;rft_id=https%3A%2F%2Fwww.theatlantic.com%2Ftechnology%2Farchive%2F2023%2F10%2Fbig-ai-silicon-valley-dominance%2F675752%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-57"><span class="mw-cite-backlink"><b><a href="#cite_ref-57">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><span class="id-lock-subscription" title="Paid subscription required"><a rel="nofollow" class="external text" href="https://www.economist.com/business/2023/03/26/big-tech-and-the-pursuit-of-ai-dominance">"Big tech and the pursuit of AI dominance"</a></span>. <i>The Economist</i>. Mar 26, 2023. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20231229021351/https://www.economist.com/business/2023/03/26/big-tech-and-the-pursuit-of-ai-dominance">Archived</a> from the original on Dec 29, 2023.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Economist&amp;rft.atitle=Big+tech+and+the+pursuit+of+AI+dominance&amp;rft.date=2023-03-26&amp;rft_id=https%3A%2F%2Fwww.economist.com%2Fbusiness%2F2023%2F03%2F26%2Fbig-tech-and-the-pursuit-of-ai-dominance&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-58"><span class="mw-cite-backlink"><b><a href="#cite_ref-58">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFung2023" class="citation news cs1">Fung B (19 December 2023). <a rel="nofollow" class="external text" href="https://www.cnn.com/2023/12/19/tech/cloud-competition-and-ai/index.html">"Where the battle to dominate AI may be won"</a>. <i>CNN Business</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240113053332/https://www.cnn.com/2023/12/19/tech/cloud-competition-and-ai/index.html">Archived</a> from the original on Jan 13, 2024.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=CNN+Business&amp;rft.atitle=Where+the+battle+to+dominate+AI+may+be+won&amp;rft.date=2023-12-19&amp;rft.aulast=Fung&amp;rft.aufirst=Brian&amp;rft_id=https%3A%2F%2Fwww.cnn.com%2F2023%2F12%2F19%2Ftech%2Fcloud-competition-and-ai%2Findex.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-59"><span class="mw-cite-backlink"><b><a href="#cite_ref-59">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMetz2023" class="citation news cs1">Metz C (5 July 2023). <a rel="nofollow" class="external text" href="https://www.nytimes.com/2023/07/05/business/artificial-intelligence-power-data-centers.html">"In the Age of A.I., Tech's Little Guys Need Big Friends"</a>. <i>The New York Times</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240708214644/https://www.nytimes.com/2023/07/05/business/artificial-intelligence-power-data-centers.html">Archived</a> from the original on 8 July 2024<span class="reference-accessdate">. Retrieved <span class="nowrap">17 July</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+New+York+Times&amp;rft.atitle=In+the+Age+of+A.I.%2C+Tech%27s+Little+Guys+Need+Big+Friends&amp;rft.date=2023-07-05&amp;rft.aulast=Metz&amp;rft.aufirst=Cade&amp;rft_id=https%3A%2F%2Fwww.nytimes.com%2F2023%2F07%2F05%2Fbusiness%2Fartificial-intelligence-power-data-centers.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-AGI-08a-60"><span class="mw-cite-backlink"><b><a href="#cite_ref-AGI-08a_60-0">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="http://www.ssec.wisc.edu/~billh/g/hibbard_agi_workshop.pdf">Open Source AI.</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160304054930/http://www.ssec.wisc.edu/~billh/g/hibbard_agi_workshop.pdf">Archived</a> 2016-03-04 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a> Bill Hibbard. 2008 <a rel="nofollow" class="external text" href="https://agi-conf.org/2008/papers/">proceedings</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240925013117/https://agi-conf.org/2008/papers/">Archived</a> 2024-09-25 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a> of the First Conference on Artificial General Intelligence, eds. Pei Wang, Ben Goertzel, and Stan Franklin.</span> </li> <li id="cite_note-61"><span class="mw-cite-backlink"><b><a href="#cite_ref-61">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFStewartMelton" class="citation web cs1">Stewart A, Melton M. <a rel="nofollow" class="external text" href="https://www.businessinsider.com/hugging-face-open-source-ai-approach-2023-12">"Hugging Face CEO says he's focused on building a 'sustainable model' for the $4.5 billion open-source-AI startup"</a>. <i>Business Insider</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240925013220/https://www.businessinsider.com/hugging-face-open-source-ai-approach-2023-12">Archived</a> from the original on 2024-09-25<span class="reference-accessdate">. Retrieved <span class="nowrap">2024-04-07</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Business+Insider&amp;rft.atitle=Hugging+Face+CEO+says+he%27s+focused+on+building+a+%27sustainable+model%27+for+the+%244.5+billion+open-source-AI+startup&amp;rft.aulast=Stewart&amp;rft.aufirst=Ashley&amp;rft.au=Melton%2C+Monica&amp;rft_id=https%3A%2F%2Fwww.businessinsider.com%2Fhugging-face-open-source-ai-approach-2023-12&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-62"><span class="mw-cite-backlink"><b><a href="#cite_ref-62">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.technologyreview.com/2023/05/12/1072950/open-source-ai-google-openai-eleuther-meta/">"The open-source AI boom is built on Big Tech's handouts. How long will it last?"</a>. <i>MIT Technology Review</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240105005257/https://www.technologyreview.com/2023/05/12/1072950/open-source-ai-google-openai-eleuther-meta/">Archived</a> from the original on 2024-01-05<span class="reference-accessdate">. Retrieved <span class="nowrap">2024-04-07</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=MIT+Technology+Review&amp;rft.atitle=The+open-source+AI+boom+is+built+on+Big+Tech%27s+handouts.+How+long+will+it+last%3F&amp;rft_id=https%3A%2F%2Fwww.technologyreview.com%2F2023%2F05%2F12%2F1072950%2Fopen-source-ai-google-openai-eleuther-meta%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-63"><span class="mw-cite-backlink"><b><a href="#cite_ref-63">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFYao2024" class="citation news cs1">Yao D (February 21, 2024). <a rel="nofollow" class="external text" href="https://aibusiness.com/nlp/google-unveils-open-source-models-to-compete-against-meta">"Google Unveils Open Source Models to Rival Meta, Mistral"</a>. <i>AI Business</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=AI+Business&amp;rft.atitle=Google+Unveils+Open+Source+Models+to+Rival+Meta%2C+Mistral&amp;rft.date=2024-02-21&amp;rft.aulast=Yao&amp;rft.aufirst=Deborah&amp;rft_id=https%3A%2F%2Faibusiness.com%2Fnlp%2Fgoogle-unveils-open-source-models-to-compete-against-meta&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-P7001-64"><span class="mw-cite-backlink"><b><a href="#cite_ref-P7001_64-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="p7001" class="citation book cs1"><a rel="nofollow" class="external text" href="https://ieeexplore.ieee.org/document/9726144"><i>7001-2021 - IEEE Standard for Transparency of Autonomous Systems</i></a>. IEEE. 4 March 2022. pp.&#160;<span class="nowrap">1–</span>54. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FIEEESTD.2022.9726144">10.1109/IEEESTD.2022.9726144</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-1-5044-8311-7" title="Special:BookSources/978-1-5044-8311-7"><bdi>978-1-5044-8311-7</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:252589405">252589405</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230726175434/https://ieeexplore.ieee.org/document/9726144">Archived</a> from the original on 26 July 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">9 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=7001-2021+-+IEEE+Standard+for+Transparency+of+Autonomous+Systems&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E1-%3C%2Fspan%3E54&amp;rft.pub=IEEE&amp;rft.date=2022-03-04&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A252589405%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1109%2FIEEESTD.2022.9726144&amp;rft.isbn=978-1-5044-8311-7&amp;rft_id=https%3A%2F%2Fieeexplore.ieee.org%2Fdocument%2F9726144&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span>.</span> </li> <li id="cite_note-65"><span class="mw-cite-backlink"><b><a href="#cite_ref-65">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKamilaJasrotia2023" class="citation journal cs1">Kamila MK, Jasrotia SS (2023-01-01). <a rel="nofollow" class="external text" href="https://doi.org/10.1108/IJOES-05-2023-0107">"Ethical issues in the development of artificial intelligence: recognizing the risks"</a>. <i>International Journal of Ethics and Systems</i>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1108%2FIJOES-05-2023-0107">10.1108/IJOES-05-2023-0107</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2514-9369">2514-9369</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:259614124">259614124</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=International+Journal+of+Ethics+and+Systems&amp;rft.atitle=Ethical+issues+in+the+development+of+artificial+intelligence%3A+recognizing+the+risks&amp;rft.date=2023-01-01&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A259614124%23id-name%3DS2CID&amp;rft.issn=2514-9369&amp;rft_id=info%3Adoi%2F10.1108%2FIJOES-05-2023-0107&amp;rft.aulast=Kamila&amp;rft.aufirst=Manoj+Kumar&amp;rft.au=Jasrotia%2C+Sahil+Singh&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1108%2FIJOES-05-2023-0107&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-WiredMS-66"><span class="mw-cite-backlink"><b><a href="#cite_ref-WiredMS_66-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="WiredMS" class="citation magazine cs1">Thurm S (July 13, 2018). <a rel="nofollow" class="external text" href="https://www.wired.com/story/microsoft-calls-for-federal-regulation-of-facial-recognition/">"Microsoft Calls For Federal Regulation of Facial Recognition"</a>. <i>Wired</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190509231338/https://www.wired.com/story/microsoft-calls-for-federal-regulation-of-facial-recognition/">Archived</a> from the original on May 9, 2019<span class="reference-accessdate">. Retrieved <span class="nowrap">January 10,</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Wired&amp;rft.atitle=Microsoft+Calls+For+Federal+Regulation+of+Facial+Recognition&amp;rft.date=2018-07-13&amp;rft.aulast=Thurm&amp;rft.aufirst=Scott&amp;rft_id=https%3A%2F%2Fwww.wired.com%2Fstory%2Fmicrosoft-calls-for-federal-regulation-of-facial-recognition%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-67"><span class="mw-cite-backlink"><b><a href="#cite_ref-67">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFPiper2024" class="citation web cs1">Piper K (2024-02-02). <a rel="nofollow" class="external text" href="https://www.vox.com/future-perfect/2024/2/2/24058484/open-source-artificial-intelligence-ai-risk-meta-llama-2-chatgpt-openai-deepfake">"Should we make our most powerful AI models open source to all?"</a>. <i>Vox</i><span class="reference-accessdate">. Retrieved <span class="nowrap">2024-04-07</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Vox&amp;rft.atitle=Should+we+make+our+most+powerful+AI+models+open+source+to+all%3F&amp;rft.date=2024-02-02&amp;rft.aulast=Piper&amp;rft.aufirst=Kelsey&amp;rft_id=https%3A%2F%2Fwww.vox.com%2Ffuture-perfect%2F2024%2F2%2F2%2F24058484%2Fopen-source-artificial-intelligence-ai-risk-meta-llama-2-chatgpt-openai-deepfake&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-68"><span class="mw-cite-backlink"><b><a href="#cite_ref-68">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFVincent2023" class="citation web cs1">Vincent J (2023-03-15). <a rel="nofollow" class="external text" href="https://www.theverge.com/2023/3/15/23640180/openai-gpt-4-launch-closed-research-ilya-sutskever-interview">"OpenAI co-founder on company's past approach to openly sharing research: "We were wrong"<span class="cs1-kern-right"></span>"</a>. <i>The Verge</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230317210900/https://www.theverge.com/2023/3/15/23640180/openai-gpt-4-launch-closed-research-ilya-sutskever-interview">Archived</a> from the original on 2023-03-17<span class="reference-accessdate">. Retrieved <span class="nowrap">2024-04-07</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=The+Verge&amp;rft.atitle=OpenAI+co-founder+on+company%27s+past+approach+to+openly+sharing+research%3A+%22We+were+wrong%22&amp;rft.date=2023-03-15&amp;rft.aulast=Vincent&amp;rft.aufirst=James&amp;rft_id=https%3A%2F%2Fwww.theverge.com%2F2023%2F3%2F15%2F23640180%2Fopenai-gpt-4-launch-closed-research-ilya-sutskever-interview&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-69"><span class="mw-cite-backlink"><b><a href="#cite_ref-69">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="https://think.kera.org/2017/12/05/inside-the-mind-of-a-i/">Inside The Mind Of A.I.</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210810003331/https://think.kera.org/2017/12/05/inside-the-mind-of-a-i/">Archived</a> 2021-08-10 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a> - Cliff Kuang interview</span> </li> <li id="cite_note-70"><span class="mw-cite-backlink"><b><a href="#cite_ref-70">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBunn2020" class="citation journal cs1">Bunn J (2020-04-13). <a rel="nofollow" class="external text" href="https://www.emerald.com/insight/content/doi/10.1108/RMJ-08-2019-0038/full/html">"Working in contexts for which transparency is important: A recordkeeping view of explainable artificial intelligence (XAI)"</a>. <i>Records Management Journal</i>. <b>30</b> (2): <span class="nowrap">143–</span>153. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1108%2FRMJ-08-2019-0038">10.1108/RMJ-08-2019-0038</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0956-5698">0956-5698</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:219079717">219079717</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Records+Management+Journal&amp;rft.atitle=Working+in+contexts+for+which+transparency+is+important%3A+A+recordkeeping+view+of+explainable+artificial+intelligence+%28XAI%29&amp;rft.volume=30&amp;rft.issue=2&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E143-%3C%2Fspan%3E153&amp;rft.date=2020-04-13&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A219079717%23id-name%3DS2CID&amp;rft.issn=0956-5698&amp;rft_id=info%3Adoi%2F10.1108%2FRMJ-08-2019-0038&amp;rft.aulast=Bunn&amp;rft.aufirst=Jenny&amp;rft_id=https%3A%2F%2Fwww.emerald.com%2Finsight%2Fcontent%2Fdoi%2F10.1108%2FRMJ-08-2019-0038%2Ffull%2Fhtml&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-71"><span class="mw-cite-backlink"><b><a href="#cite_ref-71">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLiRuijsLu2022" class="citation journal cs1">Li F, Ruijs N, Lu Y (2022-12-31). <a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Fai4010003">"Ethics &amp; AI: A Systematic Review on Ethical Concerns and Related Strategies for Designing with AI in Healthcare"</a>. <i>AI</i>. <b>4</b> (1): <span class="nowrap">28–</span>53. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Fai4010003">10.3390/ai4010003</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2673-2688">2673-2688</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=AI&amp;rft.atitle=Ethics+%26+AI%3A+A+Systematic+Review+on+Ethical+Concerns+and+Related+Strategies+for+Designing+with+AI+in+Healthcare&amp;rft.volume=4&amp;rft.issue=1&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E28-%3C%2Fspan%3E53&amp;rft.date=2022-12-31&amp;rft_id=info%3Adoi%2F10.3390%2Fai4010003&amp;rft.issn=2673-2688&amp;rft.aulast=Li&amp;rft.aufirst=Fan&amp;rft.au=Ruijs%2C+Nick&amp;rft.au=Lu%2C+Yuan&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.3390%252Fai4010003&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-72"><span class="mw-cite-backlink"><b><a href="#cite_ref-72">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHoward2019" class="citation web cs1">Howard A (29 July 2019). <a rel="nofollow" class="external text" href="https://sloanreview.mit.edu/article/the-regulation-of-ai-should-organizations-be-worried/">"The Regulation of AI – Should Organizations Be Worried? | Ayanna Howard"</a>. <i>MIT Sloan Management Review</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190814134545/https://sloanreview.mit.edu/article/the-regulation-of-ai-should-organizations-be-worried/">Archived</a> from the original on 2019-08-14<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-08-14</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=MIT+Sloan+Management+Review&amp;rft.atitle=The+Regulation+of+AI+%E2%80%93+Should+Organizations+Be+Worried%3F+%7C+Ayanna+Howard&amp;rft.date=2019-07-29&amp;rft.aulast=Howard&amp;rft.aufirst=Ayanna&amp;rft_id=https%3A%2F%2Fsloanreview.mit.edu%2Farticle%2Fthe-regulation-of-ai-should-organizations-be-worried%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-73"><span class="mw-cite-backlink"><b><a href="#cite_ref-73">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://assets.kpmg.com/content/dam/kpmg/au/pdf/2021/trust-in-ai-multiple-countries.pdf">"Trust in artificial intelligence - A five country study"</a> <span class="cs1-format">(PDF)</span>. <i>KPMG</i>. March 2021. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20231001161127/https://assets.kpmg.com/content/dam/kpmg/au/pdf/2021/trust-in-ai-multiple-countries.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2023-10-01<span class="reference-accessdate">. Retrieved <span class="nowrap">2023-10-06</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=KPMG&amp;rft.atitle=Trust+in+artificial+intelligence+-+A+five+country+study&amp;rft.date=2021-03&amp;rft_id=https%3A%2F%2Fassets.kpmg.com%2Fcontent%2Fdam%2Fkpmg%2Fau%2Fpdf%2F2021%2Ftrust-in-ai-multiple-countries.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-DeloitteGDPR-74"><span class="mw-cite-backlink"><b><a href="#cite_ref-DeloitteGDPR_74-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="DeloitteGDPR" class="citation web cs1">Bastin R, Wantz G (June 2017). <a rel="nofollow" class="external text" href="https://www2.deloitte.com/content/dam/Deloitte/lu/Documents/technology/lu-general-data-protection-regulation-cross-industry-innovation-062017.pdf">"The General Data Protection Regulation Cross-industry innovation"</a> <span class="cs1-format">(PDF)</span>. <i>Inside magazine</i>. Deloitte. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190110183405/https://www2.deloitte.com/content/dam/Deloitte/lu/Documents/technology/lu-general-data-protection-regulation-cross-industry-innovation-062017.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2019-01-10<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-01-10</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Inside+magazine&amp;rft.atitle=The+General+Data+Protection+Regulation+Cross-industry+innovation&amp;rft.date=2017-06&amp;rft.aulast=Bastin&amp;rft.aufirst=Roland&amp;rft.au=Wantz%2C+Georges&amp;rft_id=https%3A%2F%2Fwww2.deloitte.com%2Fcontent%2Fdam%2FDeloitte%2Flu%2FDocuments%2Ftechnology%2Flu-general-data-protection-regulation-cross-industry-innovation-062017.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-75"><span class="mw-cite-backlink"><b><a href="#cite_ref-75">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://news.un.org/en/story/2017/06/558962-un-artificial-intelligence-summit-aims-tackle-poverty-humanitys-grand">"UN artificial intelligence summit aims to tackle poverty, humanity's 'grand challenges'<span class="cs1-kern-right"></span>"</a>. <i>UN News</i>. 2017-06-07. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190726084819/https://news.un.org/en/story/2017/06/558962-un-artificial-intelligence-summit-aims-tackle-poverty-humanitys-grand">Archived</a> from the original on 2019-07-26<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-07-26</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=UN+News&amp;rft.atitle=UN+artificial+intelligence+summit+aims+to+tackle+poverty%2C+humanity%27s+%27grand+challenges%27&amp;rft.date=2017-06-07&amp;rft_id=https%3A%2F%2Fnews.un.org%2Fen%2Fstory%2F2017%2F06%2F558962-un-artificial-intelligence-summit-aims-tackle-poverty-humanitys-grand&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-76"><span class="mw-cite-backlink"><b><a href="#cite_ref-76">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://www.oecd.org/going-digital/ai/">"Artificial intelligence – Organisation for Economic Co-operation and Development"</a>. <i>www.oecd.org</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190722124751/http://www.oecd.org/going-digital/ai/">Archived</a> from the original on 2019-07-22<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-07-26</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=www.oecd.org&amp;rft.atitle=Artificial+intelligence+%E2%80%93+Organisation+for+Economic+Co-operation+and+Development&amp;rft_id=http%3A%2F%2Fwww.oecd.org%2Fgoing-digital%2Fai%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-77"><span class="mw-cite-backlink"><b><a href="#cite_ref-77">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAnonymous2018" class="citation web cs1">Anonymous (2018-06-14). <a rel="nofollow" class="external text" href="https://ec.europa.eu/digital-single-market/en/european-ai-alliance">"The European AI Alliance"</a>. <i>Digital Single Market – European Commission</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190801011543/https://ec.europa.eu/digital-single-market/en/european-ai-alliance">Archived</a> from the original on 2019-08-01<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-07-26</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Digital+Single+Market+%E2%80%93+European+Commission&amp;rft.atitle=The+European+AI+Alliance&amp;rft.date=2018-06-14&amp;rft.au=Anonymous&amp;rft_id=https%3A%2F%2Fec.europa.eu%2Fdigital-single-market%2Fen%2Feuropean-ai-alliance&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-78"><span class="mw-cite-backlink"><b><a href="#cite_ref-78">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFEuropean_Commission_High-Level_Expert_Group_on_AI2019" class="citation web cs1">European Commission High-Level Expert Group on AI (2019-06-26). <a rel="nofollow" class="external text" href="https://ec.europa.eu/digital-single-market/en/news/policy-and-investment-recommendations-trustworthy-artificial-intelligence">"Policy and investment recommendations for trustworthy Artificial Intelligence"</a>. <i>Shaping Europe’s digital future – European Commission</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200226023934/https://ec.europa.eu/digital-single-market/en/news/policy-and-investment-recommendations-trustworthy-artificial-intelligence">Archived</a> from the original on 2020-02-26<span class="reference-accessdate">. Retrieved <span class="nowrap">2020-03-16</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Shaping+Europe%E2%80%99s+digital+future+%E2%80%93+European+Commission&amp;rft.atitle=Policy+and+investment+recommendations+for+trustworthy+Artificial+Intelligence&amp;rft.date=2019-06-26&amp;rft.au=European+Commission+High-Level+Expert+Group+on+AI&amp;rft_id=https%3A%2F%2Fec.europa.eu%2Fdigital-single-market%2Fen%2Fnews%2Fpolicy-and-investment-recommendations-trustworthy-artificial-intelligence&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-79"><span class="mw-cite-backlink"><b><a href="#cite_ref-79">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFukuda-ParrGibbons2021" class="citation journal cs1">Fukuda-Parr S, Gibbons E (July 2021). <a rel="nofollow" class="external text" href="https://doi.org/10.1111%2F1758-5899.12965">"Emerging Consensus on 'Ethical AI': Human Rights Critique of Stakeholder Guidelines"</a>. <i>Global Policy</i>. <b>12</b> (S6): <span class="nowrap">32–</span>44. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1111%2F1758-5899.12965">10.1111/1758-5899.12965</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1758-5880">1758-5880</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Global+Policy&amp;rft.atitle=Emerging+Consensus+on+%27Ethical+AI%27%3A+Human+Rights+Critique+of+Stakeholder+Guidelines&amp;rft.volume=12&amp;rft.issue=S6&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E32-%3C%2Fspan%3E44&amp;rft.date=2021-07&amp;rft_id=info%3Adoi%2F10.1111%2F1758-5899.12965&amp;rft.issn=1758-5880&amp;rft.aulast=Fukuda-Parr&amp;rft.aufirst=Sakiko&amp;rft.au=Gibbons%2C+Elizabeth&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1111%252F1758-5899.12965&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-80"><span class="mw-cite-backlink"><b><a href="#cite_ref-80">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://cdt.org/blog/eu-tech-policy-brief-july-2019-recap/">"EU Tech Policy Brief: July 2019 Recap"</a>. <i>Center for Democracy &amp; Technology</i>. 2 August 2019. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190809194057/https://cdt.org/blog/eu-tech-policy-brief-july-2019-recap/">Archived</a> from the original on 2019-08-09<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-08-09</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Center+for+Democracy+%26+Technology&amp;rft.atitle=EU+Tech+Policy+Brief%3A+July+2019+Recap&amp;rft.date=2019-08-02&amp;rft_id=https%3A%2F%2Fcdt.org%2Fblog%2Feu-tech-policy-brief-july-2019-recap%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-81"><span class="mw-cite-backlink"><b><a href="#cite_ref-81">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCurtisGillespieLockey2022" class="citation journal cs1">Curtis C, Gillespie N, Lockey S (2022-05-24). <a rel="nofollow" class="external text" href="https://doi.org/10.1007/s43681-022-00163-7">"AI-deploying organizations are key to addressing 'perfect storm' of AI risks"</a>. <i>AI and Ethics</i>. <b>3</b> (1): <span class="nowrap">145–</span>153. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs43681-022-00163-7">10.1007/s43681-022-00163-7</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2730-5961">2730-5961</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a>&#160;<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC9127285">9127285</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a>&#160;<a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/35634256">35634256</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230315194711/https://link.springer.com/article/10.1007/s43681-022-00163-7">Archived</a> from the original on 2023-03-15<span class="reference-accessdate">. Retrieved <span class="nowrap">2022-05-29</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=AI+and+Ethics&amp;rft.atitle=AI-deploying+organizations+are+key+to+addressing+%27perfect+storm%27+of+AI+risks&amp;rft.volume=3&amp;rft.issue=1&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E145-%3C%2Fspan%3E153&amp;rft.date=2022-05-24&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC9127285%23id-name%3DPMC&amp;rft.issn=2730-5961&amp;rft_id=info%3Apmid%2F35634256&amp;rft_id=info%3Adoi%2F10.1007%2Fs43681-022-00163-7&amp;rft.aulast=Curtis&amp;rft.aufirst=Caitlin&amp;rft.au=Gillespie%2C+Nicole&amp;rft.au=Lockey%2C+Steven&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1007%2Fs43681-022-00163-7&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-Financial_Times-2021-82"><span class="mw-cite-backlink">^ <a href="#cite_ref-Financial_Times-2021_82-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Financial_Times-2021_82-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.ft.com/content/17ca620c-4d76-4a2f-829a-27d8552ce719">"Why the world needs a Bill of Rights on AI"</a>. <i>Financial Times</i>. 2021-10-18<span class="reference-accessdate">. Retrieved <span class="nowrap">2023-03-19</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Financial+Times&amp;rft.atitle=Why+the+world+needs+a+Bill+of+Rights+on+AI&amp;rft.date=2021-10-18&amp;rft_id=https%3A%2F%2Fwww.ft.com%2Fcontent%2F17ca620c-4d76-4a2f-829a-27d8552ce719&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-83"><span class="mw-cite-backlink"><b><a href="#cite_ref-83">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFChallenDennyPittGompels2019" class="citation journal cs1">Challen R, Denny J, Pitt M, Gompels L, Edwards T, Tsaneva-Atanasova K (March 2019). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6560460">"Artificial intelligence, bias and clinical safety"</a>. <i>BMJ Quality &amp; Safety</i>. <b>28</b> (3): <span class="nowrap">231–</span>237. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1136%2Fbmjqs-2018-008370">10.1136/bmjqs-2018-008370</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2044-5415">2044-5415</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a>&#160;<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6560460">6560460</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a>&#160;<a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/30636200">30636200</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=BMJ+Quality+%26+Safety&amp;rft.atitle=Artificial+intelligence%2C+bias+and+clinical+safety&amp;rft.volume=28&amp;rft.issue=3&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E231-%3C%2Fspan%3E237&amp;rft.date=2019-03&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC6560460%23id-name%3DPMC&amp;rft.issn=2044-5415&amp;rft_id=info%3Apmid%2F30636200&amp;rft_id=info%3Adoi%2F10.1136%2Fbmjqs-2018-008370&amp;rft.aulast=Challen&amp;rft.aufirst=Robert&amp;rft.au=Denny%2C+Joshua&amp;rft.au=Pitt%2C+Martin&amp;rft.au=Gompels%2C+Luke&amp;rft.au=Edwards%2C+Tom&amp;rft.au=Tsaneva-Atanasova%2C+Krasimira&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC6560460&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-84"><span class="mw-cite-backlink"><b><a href="#cite_ref-84">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFEvans2015" class="citation journal cs1"><a href="/wiki/Woody_Evans" title="Woody Evans">Evans W</a> (2015). <a rel="nofollow" class="external text" href="https://doi.org/10.5209%2Frev_TK.2015.v12.n2.49072">"Posthuman Rights: Dimensions of Transhuman Worlds"</a>. <i>Teknokultura</i>. <b>12</b> (2). <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.5209%2Frev_TK.2015.v12.n2.49072">10.5209/rev_TK.2015.v12.n2.49072</a></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Teknokultura&amp;rft.atitle=Posthuman+Rights%3A+Dimensions+of+Transhuman+Worlds&amp;rft.volume=12&amp;rft.issue=2&amp;rft.date=2015&amp;rft_id=info%3Adoi%2F10.5209%2Frev_TK.2015.v12.n2.49072&amp;rft.aulast=Evans&amp;rft.aufirst=Woody&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.5209%252Frev_TK.2015.v12.n2.49072&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-85"><span class="mw-cite-backlink"><b><a href="#cite_ref-85">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSheliazhenko2017" class="citation journal cs1"><a href="/w/index.php?title=Yurii_Sheliazhenko&amp;action=edit&amp;redlink=1" class="new" title="Yurii Sheliazhenko (page does not exist)">Sheliazhenko Y</a> (2017). <a rel="nofollow" class="external text" href="http://cyberleninka.ru/article/n/artificial-personal-autonomy-and-concept-of-robot-rights">"Artificial Personal Autonomy and Concept of Robot Rights"</a>. <i>European Journal of Law and Political Sciences</i>: <span class="nowrap">17–</span>21. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.20534%2FEJLPS-17-1-17-21">10.20534/EJLPS-17-1-17-21</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180714111141/https://cyberleninka.ru/article/n/artificial-personal-autonomy-and-concept-of-robot-rights">Archived</a> from the original on 14 July 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">10 May</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=European+Journal+of+Law+and+Political+Sciences&amp;rft.atitle=Artificial+Personal+Autonomy+and+Concept+of+Robot+Rights&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E17-%3C%2Fspan%3E21&amp;rft.date=2017&amp;rft_id=info%3Adoi%2F10.20534%2FEJLPS-17-1-17-21&amp;rft.aulast=Sheliazhenko&amp;rft.aufirst=Yurii&amp;rft_id=http%3A%2F%2Fcyberleninka.ru%2Farticle%2Fn%2Fartificial-personal-autonomy-and-concept-of-robot-rights&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-86"><span class="mw-cite-backlink"><b><a href="#cite_ref-86">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDoomen2023" class="citation journal cs1">Doomen J (2023). <a rel="nofollow" class="external text" href="https://doi.org/10.1080%2F13600834.2023.2196827">"The artificial intelligence entity as a legal person"</a>. <i>Information &amp; Communications Technology Law</i>. <b>32</b> (3): <span class="nowrap">277–</span>278. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1080%2F13600834.2023.2196827">10.1080/13600834.2023.2196827</a></span>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://hdl.handle.net/1820%2Fc29a3daa-9e36-4640-85d3-d0ffdd18a62c">1820/c29a3daa-9e36-4640-85d3-d0ffdd18a62c</a></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Information+%26+Communications+Technology+Law&amp;rft.atitle=The+artificial+intelligence+entity+as+a+legal+person&amp;rft.volume=32&amp;rft.issue=3&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E277-%3C%2Fspan%3E278&amp;rft.date=2023&amp;rft_id=info%3Ahdl%2F1820%2Fc29a3daa-9e36-4640-85d3-d0ffdd18a62c&amp;rft_id=info%3Adoi%2F10.1080%2F13600834.2023.2196827&amp;rft.aulast=Doomen&amp;rft.aufirst=Jasper&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1080%252F13600834.2023.2196827&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-87"><span class="mw-cite-backlink"><b><a href="#cite_ref-87">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="http://news.bbc.co.uk/2/hi/technology/6200005.stm">"Robots could demand legal rights"</a>. <i>BBC News</i>. December 21, 2006. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20191015042628/http://news.bbc.co.uk/2/hi/technology/6200005.stm">Archived</a> from the original on October 15, 2019<span class="reference-accessdate">. Retrieved <span class="nowrap">January 3,</span> 2010</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=BBC+News&amp;rft.atitle=Robots+could+demand+legal+rights&amp;rft.date=2006-12-21&amp;rft_id=http%3A%2F%2Fnews.bbc.co.uk%2F2%2Fhi%2Ftechnology%2F6200005.stm&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-TimesOnline-88"><span class="mw-cite-backlink"><b><a href="#cite_ref-TimesOnline_88-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHenderson2007" class="citation news cs1">Henderson M (April 24, 2007). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20080517022444/http://www.timesonline.co.uk/tol/news/uk/science/article1695546.ece">"Human rights for robots? We're getting carried away"</a>. <i>The Times Online</i>. The Times of London. Archived from <a rel="nofollow" class="external text" href="http://www.timesonline.co.uk/tol/news/uk/science/article1695546.ece">the original</a> on May 17, 2008<span class="reference-accessdate">. Retrieved <span class="nowrap">May 2,</span> 2010</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Times+Online&amp;rft.atitle=Human+rights+for+robots%3F+We%27re+getting+carried+away&amp;rft.date=2007-04-24&amp;rft.aulast=Henderson&amp;rft.aufirst=Mark&amp;rft_id=http%3A%2F%2Fwww.timesonline.co.uk%2Ftol%2Fnews%2Fuk%2Fscience%2Farticle1695546.ece&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-89"><span class="mw-cite-backlink"><b><a href="#cite_ref-89">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://techcrunch.com/2017/10/26/saudi-arabia-robot-citizen-sophia/">"Saudi Arabia bestows citizenship on a robot named Sophia"</a>. 26 October 2017. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171027023101/https://techcrunch.com/2017/10/26/saudi-arabia-robot-citizen-sophia/">Archived</a> from the original on 2017-10-27<span class="reference-accessdate">. Retrieved <span class="nowrap">2017-10-27</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Saudi+Arabia+bestows+citizenship+on+a+robot+named+Sophia&amp;rft.date=2017-10-26&amp;rft_id=https%3A%2F%2Ftechcrunch.com%2F2017%2F10%2F26%2Fsaudi-arabia-robot-citizen-sophia%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-90"><span class="mw-cite-backlink"><b><a href="#cite_ref-90">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="bs" class="citation web cs1">Vincent J (30 October 2017). <a rel="nofollow" class="external text" href="https://www.theverge.com/2017/10/30/16552006/robot-rights-citizenship-saudi-arabia-sophia">"Pretending to give a robot citizenship helps no one"</a>. <i>The Verge</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190803144659/https://www.theverge.com/2017/10/30/16552006/robot-rights-citizenship-saudi-arabia-sophia">Archived</a> from the original on 3 August 2019<span class="reference-accessdate">. Retrieved <span class="nowrap">10 January</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=The+Verge&amp;rft.atitle=Pretending+to+give+a+robot+citizenship+helps+no+one&amp;rft.date=2017-10-30&amp;rft.aulast=Vincent&amp;rft.aufirst=James&amp;rft_id=https%3A%2F%2Fwww.theverge.com%2F2017%2F10%2F30%2F16552006%2Frobot-rights-citizenship-saudi-arabia-sophia&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-91"><span class="mw-cite-backlink"><b><a href="#cite_ref-91">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWilks,_Yorick2010" class="citation book cs1">Wilks, Yorick, ed. (2010). <i>Close engagements with artificial companions: key social, psychological, ethical and design issues</i>. Amsterdam: John Benjamins Pub. Co. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-90-272-4994-4" title="Special:BookSources/978-90-272-4994-4"><bdi>978-90-272-4994-4</bdi></a>. <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/642206106">642206106</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Close+engagements+with+artificial+companions%3A+key+social%2C+psychological%2C+ethical+and+design+issues&amp;rft.place=Amsterdam&amp;rft.pub=John+Benjamins+Pub.+Co&amp;rft.date=2010&amp;rft_id=info%3Aoclcnum%2F642206106&amp;rft.isbn=978-90-272-4994-4&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-92"><span class="mw-cite-backlink"><b><a href="#cite_ref-92">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMacrae2022" class="citation journal cs1">Macrae C (September 2022). <a rel="nofollow" class="external text" href="https://onlinelibrary.wiley.com/doi/10.1111/risa.13850">"Learning from the Failure of Autonomous and Intelligent Systems: Accidents, Safety, and Sociotechnical Sources of Risk"</a>. <i>Risk Analysis</i>. <b>42</b> (9): <span class="nowrap">1999–</span>2025. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2022RiskA..42.1999M">2022RiskA..42.1999M</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1111%2Frisa.13850">10.1111/risa.13850</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0272-4332">0272-4332</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a>&#160;<a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/34814229">34814229</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Risk+Analysis&amp;rft.atitle=Learning+from+the+Failure+of+Autonomous+and+Intelligent+Systems%3A+Accidents%2C+Safety%2C+and+Sociotechnical+Sources+of+Risk&amp;rft.volume=42&amp;rft.issue=9&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E1999-%3C%2Fspan%3E2025&amp;rft.date=2022-09&amp;rft_id=info%3Adoi%2F10.1111%2Frisa.13850&amp;rft.issn=0272-4332&amp;rft_id=info%3Apmid%2F34814229&amp;rft_id=info%3Abibcode%2F2022RiskA..42.1999M&amp;rft.aulast=Macrae&amp;rft.aufirst=Carl&amp;rft_id=https%3A%2F%2Fonlinelibrary.wiley.com%2Fdoi%2F10.1111%2Frisa.13850&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-93"><span class="mw-cite-backlink"><b><a href="#cite_ref-93">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAgarwalEdelman2020" class="citation journal cs1">Agarwal A, Edelman S (2020). "Functionally effective conscious AI without suffering". <i><a href="/w/index.php?title=Journal_of_Artificial_Intelligence_and_Consciousness&amp;action=edit&amp;redlink=1" class="new" title="Journal of Artificial Intelligence and Consciousness (page does not exist)">Journal of Artificial Intelligence and Consciousness</a></i>. <b>7</b>: <span class="nowrap">39–</span>50. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2002.05652">2002.05652</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1142%2FS2705078520300030">10.1142/S2705078520300030</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:211096533">211096533</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Journal+of+Artificial+Intelligence+and+Consciousness&amp;rft.atitle=Functionally+effective+conscious+AI+without+suffering&amp;rft.volume=7&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E39-%3C%2Fspan%3E50&amp;rft.date=2020&amp;rft_id=info%3Aarxiv%2F2002.05652&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A211096533%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1142%2FS2705078520300030&amp;rft.aulast=Agarwal&amp;rft.aufirst=A&amp;rft.au=Edelman%2C+S&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-94"><span class="mw-cite-backlink"><b><a href="#cite_ref-94">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFThomas_Metzinger2021" class="citation journal cs1"><a href="/wiki/Thomas_Metzinger" title="Thomas Metzinger">Thomas Metzinger</a> (February 2021). <a rel="nofollow" class="external text" href="https://doi.org/10.1142%2FS270507852150003X">"Artificial Suffering: An Argument for a Global Moratorim on Synthetic Phenomenology"</a>. <i>Journal of Artificial Intelligence and Consciousness</i>. <b>8</b>: <span class="nowrap">43–</span>66. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1142%2FS270507852150003X">10.1142/S270507852150003X</a></span>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:233176465">233176465</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Journal+of+Artificial+Intelligence+and+Consciousness&amp;rft.atitle=Artificial+Suffering%3A+An+Argument+for+a+Global+Moratorim+on+Synthetic+Phenomenology&amp;rft.volume=8&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E43-%3C%2Fspan%3E66&amp;rft.date=2021-02&amp;rft_id=info%3Adoi%2F10.1142%2FS270507852150003X&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A233176465%23id-name%3DS2CID&amp;rft.au=Thomas+Metzinger&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1142%252FS270507852150003X&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-95"><span class="mw-cite-backlink"><b><a href="#cite_ref-95">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFChalmers2023" class="citation arxiv cs1"><a href="/wiki/David_Chalmers" title="David Chalmers">Chalmers D</a> (March 2023). "Could a Large Language Model be Conscious?". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2303.07103v1">2303.07103v1</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/Computer">Science Computer Science</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Could+a+Large+Language+Model+be+Conscious%3F&amp;rft.date=2023-03&amp;rft_id=info%3Aarxiv%2F2303.07103v1&amp;rft.aulast=Chalmers&amp;rft.aufirst=David&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-96"><span class="mw-cite-backlink"><b><a href="#cite_ref-96">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBirch2017" class="citation journal cs1"><a href="/wiki/Jonathan_Birch_(philosopher)" title="Jonathan Birch (philosopher)">Birch J</a> (2017-01-01). <a rel="nofollow" class="external text" href="https://www.wellbeingintlstudiesrepository.org/animsent/vol2/iss16/1">"Animal sentience and the precautionary principle"</a>. <i>Animal Sentience</i>. <b>2</b> (16). <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.51291%2F2377-7478.1200">10.51291/2377-7478.1200</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2377-7478">2377-7478</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240811145748/https://www.wellbeingintlstudiesrepository.org/animsent/vol2/iss16/1/">Archived</a> from the original on 2024-08-11<span class="reference-accessdate">. Retrieved <span class="nowrap">2024-07-08</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Animal+Sentience&amp;rft.atitle=Animal+sentience+and+the+precautionary+principle&amp;rft.volume=2&amp;rft.issue=16&amp;rft.date=2017-01-01&amp;rft_id=info%3Adoi%2F10.51291%2F2377-7478.1200&amp;rft.issn=2377-7478&amp;rft.aulast=Birch&amp;rft.aufirst=Jonathan&amp;rft_id=https%3A%2F%2Fwww.wellbeingintlstudiesrepository.org%2Fanimsent%2Fvol2%2Fiss16%2F1&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-97"><span class="mw-cite-backlink"><b><a href="#cite_ref-97">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFShulmanBostrom2021" class="citation journal cs1">Shulman C, Bostrom N (August 2021). <a rel="nofollow" class="external text" href="https://nickbostrom.com/papers/digital-minds.pdf">"Sharing the World with Digital Minds"</a> <span class="cs1-format">(PDF)</span>. <i>Rethinking Moral Status</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Rethinking+Moral+Status&amp;rft.atitle=Sharing+the+World+with+Digital+Minds&amp;rft.date=2021-08&amp;rft.aulast=Shulman&amp;rft.aufirst=Carl&amp;rft.au=Bostrom%2C+Nick&amp;rft_id=https%3A%2F%2Fnickbostrom.com%2Fpapers%2Fdigital-minds.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-98"><span class="mw-cite-backlink"><b><a href="#cite_ref-98">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFisher2020" class="citation news cs1">Fisher R (13 November 2020). <a rel="nofollow" class="external text" href="https://www.bbc.com/future/article/20201111-philosophy-of-utility-monsters-and-artificial-intelligence">"The intelligent monster that you should let eat you"</a>. BBC News<span class="reference-accessdate">. Retrieved <span class="nowrap">12 February</span> 2021</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.atitle=The+intelligent+monster+that+you+should+let+eat+you&amp;rft.date=2020-11-13&amp;rft.aulast=Fisher&amp;rft.aufirst=Richard&amp;rft_id=https%3A%2F%2Fwww.bbc.com%2Ffuture%2Farticle%2F20201111-philosophy-of-utility-monsters-and-artificial-intelligence&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-Weizenbaum&#39;s_critique-99"><span class="mw-cite-backlink">^ <a href="#cite_ref-Weizenbaum&#39;s_critique_99-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Weizenbaum&#39;s_critique_99-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"> <ul><li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation book cs1"><a href="/wiki/Joseph_Weizenbaum" title="Joseph Weizenbaum">Weizenbaum J</a> (1976). <a href="/wiki/Computer_Power_and_Human_Reason" title="Computer Power and Human Reason"><i>Computer Power and Human Reason</i></a>. San Francisco: W.H. Freeman &amp; Company. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-7167-0464-5" title="Special:BookSources/978-0-7167-0464-5"><bdi>978-0-7167-0464-5</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Computer+Power+and+Human+Reason&amp;rft.place=San+Francisco&amp;rft.pub=W.H.+Freeman+%26+Company&amp;rft.date=1976&amp;rft.isbn=978-0-7167-0464-5&amp;rft.aulast=Weizenbaum&amp;rft.aufirst=Joseph&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMcCorduck2004" class="citation cs2"><a href="/wiki/Pamela_McCorduck" title="Pamela McCorduck">McCorduck P</a> (2004), <i>Machines Who Think</i> (2nd&#160;ed.), Natick, Massachusetts: A. K. Peters, <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/1-5688-1205-1" title="Special:BookSources/1-5688-1205-1"><bdi>1-5688-1205-1</bdi></a></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Machines+Who+Think&amp;rft.place=Natick%2C+Massachusetts&amp;rft.edition=2nd&amp;rft.pub=A.+K.+Peters&amp;rft.date=2004&amp;rft.isbn=1-5688-1205-1&amp;rft.aulast=McCorduck&amp;rft.aufirst=Pamela&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span>, pp. 132–144 </li></ul> </span></li> <li id="cite_note-MWZ-100"><span class="mw-cite-backlink">^ <a href="#cite_ref-MWZ_100-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-MWZ_100-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><a href="/wiki/Joseph_Weizenbaum" title="Joseph Weizenbaum">Joseph Weizenbaum</a>, quoted in <a href="#CITEREFMcCorduck2004">McCorduck 2004</a>, pp.&#160;356, 374–376</span> </li> <li id="cite_note-101"><span class="mw-cite-backlink"><b><a href="#cite_ref-101">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKaplanHaenlein2019" class="citation journal cs1">Kaplan A, Haenlein M (January 2019). "Siri, Siri, in my hand: Who's the fairest in the land? On the interpretations, illustrations, and implications of artificial intelligence". <i>Business Horizons</i>. <b>62</b> (1): <span class="nowrap">15–</span>25. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.bushor.2018.08.004">10.1016/j.bushor.2018.08.004</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:158433736">158433736</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Business+Horizons&amp;rft.atitle=Siri%2C+Siri%2C+in+my+hand%3A+Who%27s+the+fairest+in+the+land%3F+On+the+interpretations%2C+illustrations%2C+and+implications+of+artificial+intelligence&amp;rft.volume=62&amp;rft.issue=1&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E15-%3C%2Fspan%3E25&amp;rft.date=2019-01&amp;rft_id=info%3Adoi%2F10.1016%2Fj.bushor.2018.08.004&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A158433736%23id-name%3DS2CID&amp;rft.aulast=Kaplan&amp;rft.aufirst=Andreas&amp;rft.au=Haenlein%2C+Michael&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-hibbard_2014-102"><span class="mw-cite-backlink">^ <a href="#cite_ref-hibbard_2014_102-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-hibbard_2014_102-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHibbard2015" class="citation arxiv cs1">Hibbard B (17 November 2015). "Ethical Artificial Intelligence". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1411.1373">1411.1373</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.AI">cs.AI</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Ethical+Artificial+Intelligence&amp;rft.date=2015-11-17&amp;rft_id=info%3Aarxiv%2F1411.1373&amp;rft.aulast=Hibbard&amp;rft.aufirst=Bill&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-103"><span class="mw-cite-backlink"><b><a href="#cite_ref-103">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDavies2016" class="citation news cs1">Davies A (29 February 2016). <a rel="nofollow" class="external text" href="https://www.wired.com/2016/02/googles-self-driving-car-may-caused-first-crash/">"Google's Self-Driving Car Caused Its First Crash"</a>. <i>Wired</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190707212719/https://www.wired.com/2016/02/googles-self-driving-car-may-caused-first-crash/">Archived</a> from the original on 7 July 2019<span class="reference-accessdate">. Retrieved <span class="nowrap">26 July</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Wired&amp;rft.atitle=Google%27s+Self-Driving+Car+Caused+Its+First+Crash&amp;rft.date=2016-02-29&amp;rft.aulast=Davies&amp;rft.aufirst=Alex&amp;rft_id=https%3A%2F%2Fwww.wired.com%2F2016%2F02%2Fgoogles-self-driving-car-may-caused-first-crash%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-104"><span class="mw-cite-backlink"><b><a href="#cite_ref-104">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLevinWong2018" class="citation news cs1"><a href="/wiki/Julia_Carrie_Wong" title="Julia Carrie Wong">Levin S</a>, Wong JC (19 March 2018). <a rel="nofollow" class="external text" href="https://www.theguardian.com/technology/2018/mar/19/uber-self-driving-car-kills-woman-arizona-tempe">"Self-driving Uber kills Arizona woman in first fatal crash involving pedestrian"</a>. <i>The Guardian</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190726084818/https://www.theguardian.com/technology/2018/mar/19/uber-self-driving-car-kills-woman-arizona-tempe">Archived</a> from the original on 26 July 2019<span class="reference-accessdate">. Retrieved <span class="nowrap">26 July</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Guardian&amp;rft.atitle=Self-driving+Uber+kills+Arizona+woman+in+first+fatal+crash+involving+pedestrian&amp;rft.date=2018-03-19&amp;rft.aulast=Levin&amp;rft.aufirst=Sam&amp;rft.au=Wong%2C+Julia+Carrie&amp;rft_id=https%3A%2F%2Fwww.theguardian.com%2Ftechnology%2F2018%2Fmar%2F19%2Fuber-self-driving-car-kills-woman-arizona-tempe&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-105"><span class="mw-cite-backlink"><b><a href="#cite_ref-105">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://futurism.com/who-responsible-when-self-driving-car-accident">"Who is responsible when a self-driving car has an accident?"</a>. <i>Futurism</i>. 30 January 2018. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190726084819/https://futurism.com/who-responsible-when-self-driving-car-accident">Archived</a> from the original on 2019-07-26<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-07-26</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Futurism&amp;rft.atitle=Who+is+responsible+when+a+self-driving+car+has+an+accident%3F&amp;rft.date=2018-01-30&amp;rft_id=https%3A%2F%2Ffuturism.com%2Fwho-responsible-when-self-driving-car-accident&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-106"><span class="mw-cite-backlink"><b><a href="#cite_ref-106">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://knowledge.wharton.upenn.edu/article/automated-car-accidents/">"Autonomous Car Crashes: Who – or What – Is to Blame?"</a>. <i>Knowledge@Wharton</i>. Law and Public Policy. Radio Business North America Podcasts. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190726084820/https://knowledge.wharton.upenn.edu/article/automated-car-accidents/">Archived</a> from the original on 2019-07-26<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-07-26</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Knowledge%40Wharton&amp;rft.atitle=Autonomous+Car+Crashes%3A+Who+%E2%80%93+or+What+%E2%80%93+Is+to+Blame%3F&amp;rft_id=https%3A%2F%2Fknowledge.wharton.upenn.edu%2Farticle%2Fautomated-car-accidents%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-107"><span class="mw-cite-backlink"><b><a href="#cite_ref-107">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDelbridge" class="citation web cs1">Delbridge E. <a rel="nofollow" class="external text" href="https://www.thebalance.com/driverless-car-accidents-4171792">"Driverless Cars Gone Wild"</a>. <i>The Balance</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190529020717/https://www.thebalance.com/driverless-car-accidents-4171792">Archived</a> from the original on 2019-05-29<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-05-29</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=The+Balance&amp;rft.atitle=Driverless+Cars+Gone+Wild&amp;rft.aulast=Delbridge&amp;rft.aufirst=Emily&amp;rft_id=https%3A%2F%2Fwww.thebalance.com%2Fdriverless-car-accidents-4171792&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-108"><span class="mw-cite-backlink"><b><a href="#cite_ref-108">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFStilgoe2020" class="citation cs2">Stilgoe J (2020), <a rel="nofollow" class="external text" href="http://link.springer.com/10.1007/978-3-030-32320-2_1">"Who Killed Elaine Herzberg?"</a>, <i>Who’s Driving Innovation?</i>, Cham: Springer International Publishing, pp.&#160;<span class="nowrap">1–</span>6, <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2F978-3-030-32320-2_1">10.1007/978-3-030-32320-2_1</a>, <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-3-030-32319-6" title="Special:BookSources/978-3-030-32319-6"><bdi>978-3-030-32319-6</bdi></a>, <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:214359377">214359377</a>, <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210318060722/https://link.springer.com/chapter/10.1007%2F978-3-030-32320-2_1">archived</a> from the original on 2021-03-18<span class="reference-accessdate">, retrieved <span class="nowrap">2020-11-11</span></span></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Who%E2%80%99s+Driving+Innovation%3F&amp;rft.atitle=Who+Killed+Elaine+Herzberg%3F&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E1-%3C%2Fspan%3E6&amp;rft.date=2020&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A214359377%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1007%2F978-3-030-32320-2_1&amp;rft.isbn=978-3-030-32319-6&amp;rft.aulast=Stilgoe&amp;rft.aufirst=Jack&amp;rft_id=http%3A%2F%2Flink.springer.com%2F10.1007%2F978-3-030-32320-2_1&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-109"><span class="mw-cite-backlink"><b><a href="#cite_ref-109">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMaxmen2018" class="citation journal cs1">Maxmen A (October 2018). <a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fd41586-018-07135-0">"Self-driving car dilemmas reveal that moral choices are not universal"</a>. <i>Nature</i>. <b>562</b> (7728): <span class="nowrap">469–</span>470. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2018Natur.562..469M">2018Natur.562..469M</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fd41586-018-07135-0">10.1038/d41586-018-07135-0</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a>&#160;<a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/30356197">30356197</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Nature&amp;rft.atitle=Self-driving+car+dilemmas+reveal+that+moral+choices+are+not+universal&amp;rft.volume=562&amp;rft.issue=7728&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E469-%3C%2Fspan%3E470&amp;rft.date=2018-10&amp;rft_id=info%3Apmid%2F30356197&amp;rft_id=info%3Adoi%2F10.1038%2Fd41586-018-07135-0&amp;rft_id=info%3Abibcode%2F2018Natur.562..469M&amp;rft.aulast=Maxmen&amp;rft.aufirst=Amy&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1038%252Fd41586-018-07135-0&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-110"><span class="mw-cite-backlink"><b><a href="#cite_ref-110">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.gov.uk/government/publications/driverless-cars-in-the-uk-a-regulatory-review">"Regulations for driverless cars"</a>. <i>GOV.UK</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190726084816/https://www.gov.uk/government/publications/driverless-cars-in-the-uk-a-regulatory-review">Archived</a> from the original on 2019-07-26<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-07-26</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=GOV.UK&amp;rft.atitle=Regulations+for+driverless+cars&amp;rft_id=https%3A%2F%2Fwww.gov.uk%2Fgovernment%2Fpublications%2Fdriverless-cars-in-the-uk-a-regulatory-review&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-111"><span class="mw-cite-backlink"><b><a href="#cite_ref-111">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://web.archive.org/web/20190726084828/https://cyberlaw.stanford.edu/wiki/index.php/Automated_Driving:_Legislative_and_Regulatory_Action">"Automated Driving: Legislative and Regulatory Action – CyberWiki"</a>. <i>cyberlaw.stanford.edu</i>. Archived from <a rel="nofollow" class="external text" href="https://cyberlaw.stanford.edu/wiki/index.php/Automated_Driving:_Legislative_and_Regulatory_Action">the original</a> on 2019-07-26<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-07-26</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=cyberlaw.stanford.edu&amp;rft.atitle=Automated+Driving%3A+Legislative+and+Regulatory+Action+%E2%80%93+CyberWiki&amp;rft_id=https%3A%2F%2Fcyberlaw.stanford.edu%2Fwiki%2Findex.php%2FAutomated_Driving%3A_Legislative_and_Regulatory_Action&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-112"><span class="mw-cite-backlink"><b><a href="#cite_ref-112">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://www.ncsl.org/research/transportation/autonomous-vehicles-self-driving-vehicles-enacted-legislation.aspx">"Autonomous Vehicles | Self-Driving Vehicles Enacted Legislation"</a>. <i>www.ncsl.org</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190726165225/http://www.ncsl.org/research/transportation/autonomous-vehicles-self-driving-vehicles-enacted-legislation.aspx">Archived</a> from the original on 2019-07-26<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-07-26</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=www.ncsl.org&amp;rft.atitle=Autonomous+Vehicles+%7C+Self-Driving+Vehicles+Enacted+Legislation&amp;rft_id=http%3A%2F%2Fwww.ncsl.org%2Fresearch%2Ftransportation%2Fautonomous-vehicles-self-driving-vehicles-enacted-legislation.aspx&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-113"><span class="mw-cite-backlink"><b><a href="#cite_ref-113">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFEtzioniEtzioni2017" class="citation journal cs1">Etzioni A, Etzioni O (2017-12-01). <a rel="nofollow" class="external text" href="https://doi.org/10.1007/s10892-017-9252-2">"Incorporating Ethics into Artificial Intelligence"</a>. <i>The Journal of Ethics</i>. <b>21</b> (4): <span class="nowrap">403–</span>418. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs10892-017-9252-2">10.1007/s10892-017-9252-2</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1572-8609">1572-8609</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:254644745">254644745</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Journal+of+Ethics&amp;rft.atitle=Incorporating+Ethics+into+Artificial+Intelligence&amp;rft.volume=21&amp;rft.issue=4&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E403-%3C%2Fspan%3E418&amp;rft.date=2017-12-01&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A254644745%23id-name%3DS2CID&amp;rft.issn=1572-8609&amp;rft_id=info%3Adoi%2F10.1007%2Fs10892-017-9252-2&amp;rft.aulast=Etzioni&amp;rft.aufirst=Amitai&amp;rft.au=Etzioni%2C+Oren&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1007%2Fs10892-017-9252-2&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-114"><span class="mw-cite-backlink"><b><a href="#cite_ref-114">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="http://news.bbc.co.uk/2/hi/technology/8182003.stm">Call for debate on killer robots</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20090807005005/http://news.bbc.co.uk/2/hi/technology/8182003.stm">Archived</a> 2009-08-07 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>, By Jason Palmer, Science and technology reporter, BBC News, 8/3/09.</span> </li> <li id="cite_note-115"><span class="mw-cite-backlink"><b><a href="#cite_ref-115">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="http://www.dailytech.com/New%20Navyfunded%20Report%20Warns%20of%20War%20Robots%20Going%20Terminator/article14298.htm">Science New Navy-funded Report Warns of War Robots Going "Terminator"</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20090728101106/http://www.dailytech.com/New%20Navyfunded%20Report%20Warns%20of%20War%20Robots%20Going%20Terminator/article14298.htm">Archived</a> 2009-07-28 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>, by Jason Mick (Blog), dailytech.com, February 17, 2009.</span> </li> <li id="cite_note-engadget.com-116"><span class="mw-cite-backlink">^ <a href="#cite_ref-engadget.com_116-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-engadget.com_116-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><a rel="nofollow" class="external text" href="https://www.engadget.com/2009/02/18/navy-report-warns-of-robot-uprising-suggests-a-strong-moral-com/">Navy report warns of robot uprising, suggests a strong moral compass</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20110604145633/http://www.engadget.com/2009/02/18/navy-report-warns-of-robot-uprising-suggests-a-strong-moral-com/">Archived</a> 2011-06-04 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>, by Joseph L. Flatley engadget.com, Feb 18th 2009.</span> </li> <li id="cite_note-117"><span class="mw-cite-backlink"><b><a href="#cite_ref-117">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="http://research.microsoft.com/en-us/um/people/horvitz/AAAI_Presidential_Panel_2008-2009.htm">AAAI Presidential Panel on Long-Term AI Futures 2008–2009 Study</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20090828214741/http://research.microsoft.com/en-us/um/people/horvitz/AAAI_Presidential_Panel_2008-2009.htm">Archived</a> 2009-08-28 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>, Association for the Advancement of Artificial Intelligence, Accessed 7/26/09.</span> </li> <li id="cite_note-118"><span class="mw-cite-backlink"><b><a href="#cite_ref-118">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFUnited_States._Defense_Innovation_Board" class="citation book cs1">United States. Defense Innovation Board. <i>AI principles: recommendations on the ethical use of artificial intelligence by the Department of Defense</i>. <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/1126650738">1126650738</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=AI+principles%3A+recommendations+on+the+ethical+use+of+artificial+intelligence+by+the+Department+of+Defense&amp;rft_id=info%3Aoclcnum%2F1126650738&amp;rft.au=United+States.+Defense+Innovation+Board&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-119"><span class="mw-cite-backlink"><b><a href="#cite_ref-119">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="http://www.dailytech.com/New%20Navyfunded%20Report%20Warns%20of%20War%20Robots%20Going%20Terminator/article14298.htm">New Navy-funded Report Warns of War Robots Going "Terminator"</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20090728101106/http://www.dailytech.com/New%20Navyfunded%20Report%20Warns%20of%20War%20Robots%20Going%20Terminator/article14298.htm">Archived</a> 2009-07-28 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>, by Jason Mick (Blog), dailytech.com, February 17, 2009.</span> </li> <li id="cite_note-120"><span class="mw-cite-backlink"><b><a href="#cite_ref-120">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFUmbrelloTorresDe_Bellis2020" class="citation journal cs1">Umbrello S, Torres P, De Bellis AF (March 2020). <a rel="nofollow" class="external text" href="http://link.springer.com/10.1007/s00146-019-00879-x">"The future of war: could lethal autonomous weapons make conflict more ethical?"</a>. <i>AI &amp; Society</i>. <b>35</b> (1): <span class="nowrap">273–</span>282. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs00146-019-00879-x">10.1007/s00146-019-00879-x</a>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<a rel="nofollow" class="external text" href="https://hdl.handle.net/2318%2F1699364">2318/1699364</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0951-5666">0951-5666</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:59606353">59606353</a>. <a rel="nofollow" class="external text" href="https://archive.today/20210105020836/https://link.springer.com/article/10.1007/s00146-019-00879-x">Archived</a> from the original on 2021-01-05<span class="reference-accessdate">. Retrieved <span class="nowrap">2020-11-11</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=AI+%26+Society&amp;rft.atitle=The+future+of+war%3A+could+lethal+autonomous+weapons+make+conflict+more+ethical%3F&amp;rft.volume=35&amp;rft.issue=1&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E273-%3C%2Fspan%3E282&amp;rft.date=2020-03&amp;rft_id=info%3Ahdl%2F2318%2F1699364&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A59606353%23id-name%3DS2CID&amp;rft.issn=0951-5666&amp;rft_id=info%3Adoi%2F10.1007%2Fs00146-019-00879-x&amp;rft.aulast=Umbrello&amp;rft.aufirst=Steven&amp;rft.au=Torres%2C+Phil&amp;rft.au=De+Bellis%2C+Angelo+F.&amp;rft_id=http%3A%2F%2Flink.springer.com%2F10.1007%2Fs00146-019-00879-x&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-121"><span class="mw-cite-backlink"><b><a href="#cite_ref-121">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFJamison2024" class="citation web cs1">Jamison M (2024-12-20). <a rel="nofollow" class="external text" href="https://executivegov.com/2024/12/darpa-launches-ethics-program-autonomous-systems/">"DARPA Launches Ethics Program for Autonomous Systems"</a>. <i>executivegov.com</i><span class="reference-accessdate">. Retrieved <span class="nowrap">2025-01-02</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=executivegov.com&amp;rft.atitle=DARPA+Launches+Ethics+Program+for+Autonomous+Systems&amp;rft.date=2024-12-20&amp;rft.aulast=Jamison&amp;rft.aufirst=Miles&amp;rft_id=https%3A%2F%2Fexecutivegov.com%2F2024%2F12%2Fdarpa-launches-ethics-program-autonomous-systems%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-122"><span class="mw-cite-backlink"><b><a href="#cite_ref-122">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.spacedaily.com/reports/CoVar_to_develop_Ethical_Standards_for_Autonomous_Systems_under_DARPA_ASIMOV_contract_999.html">"DARPA's ASIMOV seeks to develop Ethical Standards for Autonomous Systems"</a>. <i>Space Daily</i><span class="reference-accessdate">. Retrieved <span class="nowrap">2025-01-02</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Space+Daily&amp;rft.atitle=DARPA%27s+ASIMOV+seeks+to+develop+Ethical+Standards+for+Autonomous+Systems&amp;rft_id=https%3A%2F%2Fwww.spacedaily.com%2Freports%2FCoVar_to_develop_Ethical_Standards_for_Autonomous_Systems_under_DARPA_ASIMOV_contract_999.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-123"><span class="mw-cite-backlink"><b><a href="#cite_ref-123">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHellström2013" class="citation journal cs1">Hellström T (June 2013). "On the moral responsibility of military robots". <i>Ethics and Information Technology</i>. <b>15</b> (2): <span class="nowrap">99–</span>107. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs10676-012-9301-2">10.1007/s10676-012-9301-2</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:15205810">15205810</a>. <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><a href="/wiki/ProQuest" title="ProQuest">ProQuest</a>&#160;<a rel="nofollow" class="external text" href="https://www.proquest.com/docview/1372020233">1372020233</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Ethics+and+Information+Technology&amp;rft.atitle=On+the+moral+responsibility+of+military+robots&amp;rft.volume=15&amp;rft.issue=2&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E99-%3C%2Fspan%3E107&amp;rft.date=2013-06&amp;rft_id=info%3Adoi%2F10.1007%2Fs10676-012-9301-2&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A15205810%23id-name%3DS2CID&amp;rft.aulast=Hellstr%C3%B6m&amp;rft.aufirst=Thomas&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-124"><span class="mw-cite-backlink"><b><a href="#cite_ref-124">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMitra2018" class="citation web cs1">Mitra A (5 April 2018). <a rel="nofollow" class="external text" href="https://qz.com/1244055/we-can-train-ai-to-identify-good-and-evil-and-then-use-it-to-teach-us-morality/">"We can train AI to identify good and evil, and then use it to teach us morality"</a>. <i>Quartz</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190726085248/https://qz.com/1244055/we-can-train-ai-to-identify-good-and-evil-and-then-use-it-to-teach-us-morality/">Archived</a> from the original on 2019-07-26<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-07-26</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Quartz&amp;rft.atitle=We+can+train+AI+to+identify+good+and+evil%2C+and+then+use+it+to+teach+us+morality&amp;rft.date=2018-04-05&amp;rft.aulast=Mitra&amp;rft.aufirst=Ambarish&amp;rft_id=https%3A%2F%2Fqz.com%2F1244055%2Fwe-can-train-ai-to-identify-good-and-evil-and-then-use-it-to-teach-us-morality%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-125"><span class="mw-cite-backlink"><b><a href="#cite_ref-125">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDominguez2022" class="citation news cs1">Dominguez G (23 August 2022). <a rel="nofollow" class="external text" href="https://www.japantimes.co.jp/news/2022/08/23/asia-pacific/south-korea-stealth-drones-development/">"South Korea developing new stealthy drones to support combat aircraft"</a>. <i><a href="/wiki/The_Japan_Times" title="The Japan Times">The Japan Times</a></i><span class="reference-accessdate">. Retrieved <span class="nowrap">14 June</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Japan+Times&amp;rft.atitle=South+Korea+developing+new+stealthy+drones+to+support+combat+aircraft&amp;rft.date=2022-08-23&amp;rft.aulast=Dominguez&amp;rft.aufirst=Gabriel&amp;rft_id=https%3A%2F%2Fwww.japantimes.co.jp%2Fnews%2F2022%2F08%2F23%2Fasia-pacific%2Fsouth-korea-stealth-drones-development%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-126"><span class="mw-cite-backlink"><b><a href="#cite_ref-126">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://futureoflife.org/ai-principles/">"AI Principles"</a>. <i>Future of Life Institute</i>. 11 August 2017. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171211171044/https://futureoflife.org/ai-principles/">Archived</a> from the original on 2017-12-11<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-07-26</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Future+of+Life+Institute&amp;rft.atitle=AI+Principles&amp;rft.date=2017-08-11&amp;rft_id=https%3A%2F%2Ffutureoflife.org%2Fai-principles%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-theatlantic.com-127"><span class="mw-cite-backlink">^ <a href="#cite_ref-theatlantic.com_127-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-theatlantic.com_127-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFZach_Musgrave_and_Bryan_W._Roberts2015" class="citation web cs1">Zach Musgrave and Bryan W. Roberts (2015-08-14). <a rel="nofollow" class="external text" href="https://www.theatlantic.com/technology/archive/2015/08/humans-not-robots-are-the-real-reason-artificial-intelligence-is-scary/400994/">"Why Artificial Intelligence Can Too Easily Be Weaponized – The Atlantic"</a>. <i>The Atlantic</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170411140722/https://www.theatlantic.com/technology/archive/2015/08/humans-not-robots-are-the-real-reason-artificial-intelligence-is-scary/400994/">Archived</a> from the original on 2017-04-11<span class="reference-accessdate">. Retrieved <span class="nowrap">2017-03-06</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=The+Atlantic&amp;rft.atitle=Why+Artificial+Intelligence+Can+Too+Easily+Be+Weaponized+%E2%80%93+The+Atlantic&amp;rft.date=2015-08-14&amp;rft.au=Zach+Musgrave+and+Bryan+W.+Roberts&amp;rft_id=https%3A%2F%2Fwww.theatlantic.com%2Ftechnology%2Farchive%2F2015%2F08%2Fhumans-not-robots-are-the-real-reason-artificial-intelligence-is-scary%2F400994%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-128"><span class="mw-cite-backlink"><b><a href="#cite_ref-128">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCat_Zakrzewski2015" class="citation web cs1">Cat Zakrzewski (2015-07-27). <a rel="nofollow" class="external text" href="https://blogs.wsj.com/digits/2015/07/27/musk-hawking-warn-of-artificial-intelligence-weapons/">"Musk, Hawking Warn of Artificial Intelligence Weapons"</a>. <i>WSJ</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20150728173944/http://blogs.wsj.com/digits/2015/07/27/musk-hawking-warn-of-artificial-intelligence-weapons/">Archived</a> from the original on 2015-07-28<span class="reference-accessdate">. Retrieved <span class="nowrap">2017-08-04</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=WSJ&amp;rft.atitle=Musk%2C+Hawking+Warn+of+Artificial+Intelligence+Weapons&amp;rft.date=2015-07-27&amp;rft.au=Cat+Zakrzewski&amp;rft_id=https%3A%2F%2Fblogs.wsj.com%2Fdigits%2F2015%2F07%2F27%2Fmusk-hawking-warn-of-artificial-intelligence-weapons%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-129"><span class="mw-cite-backlink"><b><a href="#cite_ref-129">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.openphilanthropy.org/research/potential-risks-from-advanced-artificial-intelligence/">"Potential Risks from Advanced Artificial Intelligence"</a>. <i>Open Philanthropy</i>. August 11, 2015<span class="reference-accessdate">. Retrieved <span class="nowrap">2024-04-07</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Open+Philanthropy&amp;rft.atitle=Potential+Risks+from+Advanced+Artificial+Intelligence&amp;rft.date=2015-08-11&amp;rft_id=https%3A%2F%2Fwww.openphilanthropy.org%2Fresearch%2Fpotential-risks-from-advanced-artificial-intelligence%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-:023-130"><span class="mw-cite-backlink">^ <a href="#cite_ref-:023_130-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:023_130-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBachulskaLeonardOertel2024" class="citation book cs1">Bachulska A, Leonard M, Oertel J (2 July 2024). <a rel="nofollow" class="external text" href="https://ecfr.eu/publication/idea-of-china/"><i>The Idea of China: Chinese Thinkers on Power, Progress, and People</i></a> <span class="cs1-format">(EPUB)</span>. Berlin, Germany: <a href="/wiki/European_Council_on_Foreign_Relations" title="European Council on Foreign Relations">European Council on Foreign Relations</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-1-916682-42-9" title="Special:BookSources/978-1-916682-42-9"><bdi>978-1-916682-42-9</bdi></a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240717120845/https://ecfr.eu/publication/idea-of-china/">Archived</a> from the original on 17 July 2024<span class="reference-accessdate">. Retrieved <span class="nowrap">22 July</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=The+Idea+of+China%3A+Chinese+Thinkers+on+Power%2C+Progress%2C+and+People&amp;rft.place=Berlin%2C+Germany&amp;rft.pub=European+Council+on+Foreign+Relations&amp;rft.date=2024-07-02&amp;rft.isbn=978-1-916682-42-9&amp;rft.aulast=Bachulska&amp;rft.aufirst=Alicja&amp;rft.au=Leonard%2C+Mark&amp;rft.au=Oertel%2C+Janka&amp;rft_id=https%3A%2F%2Fecfr.eu%2Fpublication%2Fidea-of-china%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-reg-131"><span class="mw-cite-backlink"><b><a href="#cite_ref-reg_131-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBrandon_Vigliarolo" class="citation web cs1">Brandon Vigliarolo. <a rel="nofollow" class="external text" href="https://www.theregister.com/2023/02/17/military_ai_summit/">"International military AI summit ends with 60-state pledge"</a>. <i>www.theregister.com</i><span class="reference-accessdate">. Retrieved <span class="nowrap">2023-02-17</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=www.theregister.com&amp;rft.atitle=International+military+AI+summit+ends+with+60-state+pledge&amp;rft.au=Brandon+Vigliarolo&amp;rft_id=https%3A%2F%2Fwww.theregister.com%2F2023%2F02%2F17%2Fmilitary_ai_summit%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-NYT-2009-132"><span class="mw-cite-backlink">^ <a href="#cite_ref-NYT-2009_132-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-NYT-2009_132-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMarkoff2009" class="citation news cs1">Markoff J (25 July 2009). <a rel="nofollow" class="external text" href="https://www.nytimes.com/2009/07/26/science/26robot.html">"Scientists Worry Machines May Outsmart Man"</a>. <i>The New York Times</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170225202201/http://www.nytimes.com/2009/07/26/science/26robot.html">Archived</a> from the original on 25 February 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">24 February</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+New+York+Times&amp;rft.atitle=Scientists+Worry+Machines+May+Outsmart+Man&amp;rft.date=2009-07-25&amp;rft.aulast=Markoff&amp;rft.aufirst=John&amp;rft_id=https%3A%2F%2Fwww.nytimes.com%2F2009%2F07%2F26%2Fscience%2F26robot.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-Muehlhauser,_Luke_2012-133"><span class="mw-cite-backlink"><b><a href="#cite_ref-Muehlhauser,_Luke_2012_133-0">^</a></b></span> <span class="reference-text">Muehlhauser, Luke, and Louie Helm. 2012. <a rel="nofollow" class="external text" href="https://intelligence.org/files/IE-ME.pdf">"Intelligence Explosion and Machine Ethics"</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20150507173028/http://intelligence.org/files/IE-ME.pdf">Archived</a> 2015-05-07 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>. In Singularity Hypotheses: A Scientific and Philosophical Assessment, edited by Amnon Eden, Johnny Søraker, James H. Moor, and Eric Steinhart. Berlin: Springer.</span> </li> <li id="cite_note-Bostrom,_Nick_2003-134"><span class="mw-cite-backlink"><b><a href="#cite_ref-Bostrom,_Nick_2003_134-0">^</a></b></span> <span class="reference-text">Bostrom, Nick. 2003. <a rel="nofollow" class="external text" href="http://www.nickbostrom.com/ethics/ai.html">"Ethical Issues in Advanced Artificial Intelligence"</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20181008090224/http://www.nickbostrom.com/ethics/ai.html">Archived</a> 2018-10-08 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>. In Cognitive, Emotive and Ethical Aspects of Decision Making in Humans and in Artificial Intelligence, edited by Iva Smit and George E. Lasker, 12–17. Vol. 2. Windsor, ON: International Institute for Advanced Studies in Systems Research / Cybernetics.</span> </li> <li id="cite_note-135"><span class="mw-cite-backlink"><b><a href="#cite_ref-135">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBostrom2017" class="citation book cs1">Bostrom N (2017). <i>Superintelligence: paths, dangers, strategies</i>. Oxford, United Kingdom: Oxford University Press. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-19-967811-2" title="Special:BookSources/978-0-19-967811-2"><bdi>978-0-19-967811-2</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Superintelligence%3A+paths%2C+dangers%2C+strategies&amp;rft.place=Oxford%2C+United+Kingdom&amp;rft.pub=Oxford+University+Press&amp;rft.date=2017&amp;rft.isbn=978-0-19-967811-2&amp;rft.aulast=Bostrom&amp;rft.aufirst=Nick&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-136"><span class="mw-cite-backlink"><b><a href="#cite_ref-136">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFUmbrelloBaum2018" class="citation journal cs1">Umbrello S, Baum SD (2018-06-01). <a rel="nofollow" class="external text" href="http://www.sciencedirect.com/science/article/pii/S0016328717301908">"Evaluating future nanotechnology: The net societal impacts of atomically precise manufacturing"</a>. <i>Futures</i>. <b>100</b>: <span class="nowrap">63–</span>73. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.futures.2018.04.007">10.1016/j.futures.2018.04.007</a>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://hdl.handle.net/2318%2F1685533">2318/1685533</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0016-3287">0016-3287</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:158503813">158503813</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190509222110/https://www.sciencedirect.com/science/article/pii/S0016328717301908">Archived</a> from the original on 2019-05-09<span class="reference-accessdate">. Retrieved <span class="nowrap">2020-11-29</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Futures&amp;rft.atitle=Evaluating+future+nanotechnology%3A+The+net+societal+impacts+of+atomically+precise+manufacturing&amp;rft.volume=100&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E63-%3C%2Fspan%3E73&amp;rft.date=2018-06-01&amp;rft_id=info%3Ahdl%2F2318%2F1685533&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A158503813%23id-name%3DS2CID&amp;rft.issn=0016-3287&amp;rft_id=info%3Adoi%2F10.1016%2Fj.futures.2018.04.007&amp;rft.aulast=Umbrello&amp;rft.aufirst=Steven&amp;rft.au=Baum%2C+Seth+D.&amp;rft_id=http%3A%2F%2Fwww.sciencedirect.com%2Fscience%2Farticle%2Fpii%2FS0016328717301908&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-137"><span class="mw-cite-backlink"><b><a href="#cite_ref-137">^</a></b></span> <span class="reference-text">Yudkowsky, Eliezer. 2011. <a rel="nofollow" class="external text" href="https://intelligence.org/files/ComplexValues.pdf">"Complex Value Systems in Friendly AI"</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20150929212318/http://intelligence.org/files/ComplexValues.pdf">Archived</a> 2015-09-29 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>. In Schmidhuber, Thórisson, and Looks 2011, 388–393.</span> </li> <li id="cite_note-138"><span class="mw-cite-backlink"><b><a href="#cite_ref-138">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRussell2019" class="citation book cs1"><a href="/wiki/Stuart_J._Russell" title="Stuart J. Russell">Russell S</a> (October 8, 2019). <a href="/wiki/Human_Compatible" title="Human Compatible"><i>Human Compatible: Artificial Intelligence and the Problem of Control</i></a>. United States: Viking. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-525-55861-3" title="Special:BookSources/978-0-525-55861-3"><bdi>978-0-525-55861-3</bdi></a>. <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/1083694322">1083694322</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Human+Compatible%3A+Artificial+Intelligence+and+the+Problem+of+Control&amp;rft.place=United+States&amp;rft.pub=Viking&amp;rft.date=2019-10-08&amp;rft_id=info%3Aoclcnum%2F1083694322&amp;rft.isbn=978-0-525-55861-3&amp;rft.aulast=Russell&amp;rft.aufirst=Stuart&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-139"><span class="mw-cite-backlink"><b><a href="#cite_ref-139">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFYampolskiy2020" class="citation journal cs1">Yampolskiy RV (2020-03-01). <a rel="nofollow" class="external text" href="https://www.worldscientific.com/doi/abs/10.1142/S2705078520500034">"Unpredictability of AI: On the Impossibility of Accurately Predicting All Actions of a Smarter Agent"</a>. <i>Journal of Artificial Intelligence and Consciousness</i>. <b>07</b> (1): <span class="nowrap">109–</span>118. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1142%2FS2705078520500034">10.1142/S2705078520500034</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2705-0785">2705-0785</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:218916769">218916769</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210318060657/https://www.worldscientific.com/doi/abs/10.1142/S2705078520500034">Archived</a> from the original on 2021-03-18<span class="reference-accessdate">. Retrieved <span class="nowrap">2020-11-29</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Journal+of+Artificial+Intelligence+and+Consciousness&amp;rft.atitle=Unpredictability+of+AI%3A+On+the+Impossibility+of+Accurately+Predicting+All+Actions+of+a+Smarter+Agent&amp;rft.volume=07&amp;rft.issue=1&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E109-%3C%2Fspan%3E118&amp;rft.date=2020-03-01&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A218916769%23id-name%3DS2CID&amp;rft.issn=2705-0785&amp;rft_id=info%3Adoi%2F10.1142%2FS2705078520500034&amp;rft.aulast=Yampolskiy&amp;rft.aufirst=Roman+V.&amp;rft_id=https%3A%2F%2Fwww.worldscientific.com%2Fdoi%2Fabs%2F10.1142%2FS2705078520500034&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-140"><span class="mw-cite-backlink"><b><a href="#cite_ref-140">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWallachVallor2020" class="citation cs2">Wallach W, Vallor S (2020-09-17), <a rel="nofollow" class="external text" href="https://oxford.universitypressscholarship.com/view/10.1093/oso/9780190905033.001.0001/oso-9780190905033-chapter-14">"Moral Machines: From Value Alignment to Embodied Virtue"</a>, <i>Ethics of Artificial Intelligence</i>, Oxford University Press, pp.&#160;<span class="nowrap">383–</span>412, <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1093%2Foso%2F9780190905033.003.0014">10.1093/oso/9780190905033.003.0014</a>, <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-19-090503-3" title="Special:BookSources/978-0-19-090503-3"><bdi>978-0-19-090503-3</bdi></a>, <a rel="nofollow" class="external text" href="https://web.archive.org/web/20201208114354/https://oxford.universitypressscholarship.com/view/10.1093/oso/9780190905033.001.0001/oso-9780190905033-chapter-14">archived</a> from the original on 2020-12-08<span class="reference-accessdate">, retrieved <span class="nowrap">2020-11-29</span></span></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Ethics+of+Artificial+Intelligence&amp;rft.atitle=Moral+Machines%3A+From+Value+Alignment+to+Embodied+Virtue&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E383-%3C%2Fspan%3E412&amp;rft.date=2020-09-17&amp;rft_id=info%3Adoi%2F10.1093%2Foso%2F9780190905033.003.0014&amp;rft.isbn=978-0-19-090503-3&amp;rft.aulast=Wallach&amp;rft.aufirst=Wendell&amp;rft.au=Vallor%2C+Shannon&amp;rft_id=https%3A%2F%2Foxford.universitypressscholarship.com%2Fview%2F10.1093%2Foso%2F9780190905033.001.0001%2Foso-9780190905033-chapter-14&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-141"><span class="mw-cite-backlink"><b><a href="#cite_ref-141">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFUmbrello2019" class="citation journal cs1">Umbrello S (2019). <a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Fbdcc3010005">"Beneficial Artificial Intelligence Coordination by Means of a Value Sensitive Design Approach"</a>. <i>Big Data and Cognitive Computing</i>. <b>3</b> (1): 5. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Fbdcc3010005">10.3390/bdcc3010005</a></span>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://hdl.handle.net/2318%2F1685727">2318/1685727</a></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Big+Data+and+Cognitive+Computing&amp;rft.atitle=Beneficial+Artificial+Intelligence+Coordination+by+Means+of+a+Value+Sensitive+Design+Approach&amp;rft.volume=3&amp;rft.issue=1&amp;rft.pages=5&amp;rft.date=2019&amp;rft_id=info%3Ahdl%2F2318%2F1685727&amp;rft_id=info%3Adoi%2F10.3390%2Fbdcc3010005&amp;rft.aulast=Umbrello&amp;rft.aufirst=Steven&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.3390%252Fbdcc3010005&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-142"><span class="mw-cite-backlink"><b><a href="#cite_ref-142">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFloridiCowlsKingTaddeo2020" class="citation journal cs1">Floridi L, Cowls J, King TC, Taddeo M (2020). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7286860">"How to Design AI for Social Good: Seven Essential Factors"</a>. <i>Science and Engineering Ethics</i>. <b>26</b> (3): <span class="nowrap">1771–</span>1796. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs11948-020-00213-5">10.1007/s11948-020-00213-5</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1353-3452">1353-3452</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a>&#160;<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7286860">7286860</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a>&#160;<a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/32246245">32246245</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Science+and+Engineering+Ethics&amp;rft.atitle=How+to+Design+AI+for+Social+Good%3A+Seven+Essential+Factors&amp;rft.volume=26&amp;rft.issue=3&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E1771-%3C%2Fspan%3E1796&amp;rft.date=2020&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC7286860%23id-name%3DPMC&amp;rft.issn=1353-3452&amp;rft_id=info%3Apmid%2F32246245&amp;rft_id=info%3Adoi%2F10.1007%2Fs11948-020-00213-5&amp;rft.aulast=Floridi&amp;rft.aufirst=Luciano&amp;rft.au=Cowls%2C+Josh&amp;rft.au=King%2C+Thomas+C.&amp;rft.au=Taddeo%2C+Mariarosaria&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC7286860&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-143"><span class="mw-cite-backlink"><b><a href="#cite_ref-143">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://github.com/NVIDIA/NeMo-Guardrails">"NeMo Guardrails"</a>. <i>NeMo Guardrails</i><span class="reference-accessdate">. Retrieved <span class="nowrap">2024-12-06</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=NeMo+Guardrails&amp;rft.atitle=NeMo+Guardrails&amp;rft_id=https%3A%2F%2Fgithub.com%2FNVIDIA%2FNeMo-Guardrails&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-144"><span class="mw-cite-backlink"><b><a href="#cite_ref-144">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://ai.meta.com/research/publications/llama-guard-llm-based-input-output-safeguard-for-human-ai-conversations/">"Llama Guard: LLM-based Input-Output Safeguard for Human-AI Conversations"</a>. <i>Meta.com</i><span class="reference-accessdate">. Retrieved <span class="nowrap">2024-12-06</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Meta.com&amp;rft.atitle=Llama+Guard%3A+LLM-based+Input-Output+Safeguard+for+Human-AI+Conversations&amp;rft_id=https%3A%2F%2Fai.meta.com%2Fresearch%2Fpublications%2Fllama-guard-llm-based-input-output-safeguard-for-human-ai-conversations%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-:0-145"><span class="mw-cite-backlink">^ <a href="#cite_ref-:0_145-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:0_145-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFŠekrstMcHughCefalu2024" class="citation arxiv cs1">Šekrst K, McHugh J, Cefalu JR (2024). "AI Ethics by Design: Implementing Customizable Guardrails for Responsible AI Development". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2411.14442">2411.14442</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CY">cs.CY</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=AI+Ethics+by+Design%3A+Implementing+Customizable+Guardrails+for+Responsible+AI+Development&amp;rft.date=2024&amp;rft_id=info%3Aarxiv%2F2411.14442&amp;rft.aulast=%C5%A0ekrst&amp;rft.aufirst=Kristina&amp;rft.au=McHugh%2C+Jeremy&amp;rft.au=Cefalu%2C+Jonathan+Rodriguez&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-146"><span class="mw-cite-backlink"><b><a href="#cite_ref-146">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://docs.nvidia.com/nemo-guardrails/index.html">"NVIDIA NeMo Guardrails"</a>. <i>NVIDIA NeMo Guardrails</i><span class="reference-accessdate">. Retrieved <span class="nowrap">2024-12-06</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=NVIDIA+NeMo+Guardrails&amp;rft.atitle=NVIDIA+NeMo+Guardrails&amp;rft_id=https%3A%2F%2Fdocs.nvidia.com%2Fnemo-guardrails%2Findex.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-147"><span class="mw-cite-backlink"><b><a href="#cite_ref-147">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFInanUpasaniChiRungta2023" class="citation arxiv cs1">Inan H, Upasani K, Chi J, Rungta R, Iyer K, Mao Y, Tontchev M, Hu Q, Fuller B, Testuggine D, Khabsa M (2023). "Llama Guard: LLM-based Input-Output Safeguard for Human-AI Conversations". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2312.06674">2312.06674</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CL">cs.CL</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Llama+Guard%3A+LLM-based+Input-Output+Safeguard+for+Human-AI+Conversations&amp;rft.date=2023&amp;rft_id=info%3Aarxiv%2F2312.06674&amp;rft.aulast=Inan&amp;rft.aufirst=Hakan&amp;rft.au=Upasani%2C+Kartikeya&amp;rft.au=Chi%2C+Jianfeng&amp;rft.au=Rungta%2C+Rashi&amp;rft.au=Iyer%2C+Krithika&amp;rft.au=Mao%2C+Yuning&amp;rft.au=Tontchev%2C+Michael&amp;rft.au=Hu%2C+Qing&amp;rft.au=Fuller%2C+Brian&amp;rft.au=Testuggine%2C+Davide&amp;rft.au=Khabsa%2C+Madian&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-148"><span class="mw-cite-backlink"><b><a href="#cite_ref-148">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDongMuJinQi2024" class="citation arxiv cs1">Dong Y, Mu R, Jin G, Qi Y, Hu J, Zhao X, Meng J, Ruan W, Huang X (2024). "Building Guardrails for Large Language Models". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2402.01822">2402.01822</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs">cs</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Building+Guardrails+for+Large+Language+Models&amp;rft.date=2024&amp;rft_id=info%3Aarxiv%2F2402.01822&amp;rft.aulast=Dong&amp;rft.aufirst=Yi&amp;rft.au=Mu%2C+Ronghui&amp;rft.au=Jin%2C+Gaojie&amp;rft.au=Qi%2C+Yi&amp;rft.au=Hu%2C+Jinwei&amp;rft.au=Zhao%2C+Xingyu&amp;rft.au=Meng%2C+Jie&amp;rft.au=Ruan%2C+Wenjie&amp;rft.au=Huang%2C+Xiaowei&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-149"><span class="mw-cite-backlink"><b><a href="#cite_ref-149">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFiegerman2016" class="citation news cs1">Fiegerman S (28 September 2016). <a rel="nofollow" class="external text" href="https://money.cnn.com/2016/09/28/technology/partnership-on-ai/">"Facebook, Google, Amazon create group to ease AI concerns"</a>. <i>CNNMoney</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200917141730/https://money.cnn.com/2016/09/28/technology/partnership-on-ai/">Archived</a> from the original on 17 September 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">18 August</span> 2020</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=CNNMoney&amp;rft.atitle=Facebook%2C+Google%2C+Amazon+create+group+to+ease+AI+concerns&amp;rft.date=2016-09-28&amp;rft.aulast=Fiegerman&amp;rft.aufirst=Seth&amp;rft_id=https%3A%2F%2Fmoney.cnn.com%2F2016%2F09%2F28%2Ftechnology%2Fpartnership-on-ai%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-150"><span class="mw-cite-backlink"><b><a href="#cite_ref-150">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSlotaFleischmannGreenbergVerma2023" class="citation journal cs1">Slota SC, Fleischmann KR, Greenberg S, Verma N, Cummings B, Li L, Shenefiel C (2023). <a rel="nofollow" class="external text" href="https://onlinelibrary.wiley.com/doi/10.1002/asi.24638">"Locating the work of artificial intelligence ethics"</a>. <i>Journal of the Association for Information Science and Technology</i>. <b>74</b> (3): <span class="nowrap">311–</span>322. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1002%2Fasi.24638">10.1002/asi.24638</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2330-1635">2330-1635</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:247342066">247342066</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240925020205/https://onlinelibrary.wiley.com/doi/10.1002/asi.24638">Archived</a> from the original on 2024-09-25<span class="reference-accessdate">. Retrieved <span class="nowrap">2023-07-21</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Journal+of+the+Association+for+Information+Science+and+Technology&amp;rft.atitle=Locating+the+work+of+artificial+intelligence+ethics&amp;rft.volume=74&amp;rft.issue=3&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E311-%3C%2Fspan%3E322&amp;rft.date=2023&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A247342066%23id-name%3DS2CID&amp;rft.issn=2330-1635&amp;rft_id=info%3Adoi%2F10.1002%2Fasi.24638&amp;rft.aulast=Slota&amp;rft.aufirst=Stephen+C.&amp;rft.au=Fleischmann%2C+Kenneth+R.&amp;rft.au=Greenberg%2C+Sherri&amp;rft.au=Verma%2C+Nitin&amp;rft.au=Cummings%2C+Brenna&amp;rft.au=Li%2C+Lan&amp;rft.au=Shenefiel%2C+Chris&amp;rft_id=https%3A%2F%2Fonlinelibrary.wiley.com%2Fdoi%2F10.1002%2Fasi.24638&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-151"><span class="mw-cite-backlink"><b><a href="#cite_ref-151">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://ec.europa.eu/digital-single-market/en/news/ethics-guidelines-trustworthy-ai">"Ethics guidelines for trustworthy AI"</a>. <i>Shaping Europe’s digital future – European Commission</i>. European Commission. 2019-04-08. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200220002342/https://ec.europa.eu/digital-single-market/en/news/ethics-guidelines-trustworthy-ai">Archived</a> from the original on 2020-02-20<span class="reference-accessdate">. Retrieved <span class="nowrap">2020-02-20</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Shaping+Europe%E2%80%99s+digital+future+%E2%80%93+European+Commission&amp;rft.atitle=Ethics+guidelines+for+trustworthy+AI&amp;rft.date=2019-04-08&amp;rft_id=https%3A%2F%2Fec.europa.eu%2Fdigital-single-market%2Fen%2Fnews%2Fethics-guidelines-trustworthy-ai&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-152"><span class="mw-cite-backlink"><b><a href="#cite_ref-152">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://ec.europa.eu/digital-single-market/en/news/white-paper-artificial-intelligence-european-approach-excellence-and-trust">"White Paper on Artificial Intelligence – a European approach to excellence and trust &#124; Shaping Europe's digital future"</a>. 19 February 2020. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210306003222/https://ec.europa.eu/digital-single-market/en/news/white-paper-artificial-intelligence-european-approach-excellence-and-trust">Archived</a> from the original on 2021-03-06<span class="reference-accessdate">. Retrieved <span class="nowrap">2021-03-18</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=White+Paper+on+Artificial+Intelligence+%E2%80%93+a+European+approach+to+excellence+and+trust+%26%23124%3B+Shaping+Europe%27s+digital+future&amp;rft.date=2020-02-19&amp;rft_id=https%3A%2F%2Fec.europa.eu%2Fdigital-single-market%2Fen%2Fnews%2Fwhite-paper-artificial-intelligence-european-approach-excellence-and-trust&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-153"><span class="mw-cite-backlink"><b><a href="#cite_ref-153">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.oecd.ai/">"OECD AI Policy Observatory"</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210308171133/https://oecd.ai/">Archived</a> from the original on 2021-03-08<span class="reference-accessdate">. Retrieved <span class="nowrap">2021-03-18</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=OECD+AI+Policy+Observatory&amp;rft_id=https%3A%2F%2Fwww.oecd.ai%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-154"><span class="mw-cite-backlink"><b><a href="#cite_ref-154">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation book cs1"><a rel="nofollow" class="external text" href="https://unesdoc.unesco.org/ark:/48223/pf0000381137.locale=en"><i>Recommendation on the Ethics of Artificial Intelligence</i></a>. UNESCO. 2021.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Recommendation+on+the+Ethics+of+Artificial+Intelligence&amp;rft.pub=UNESCO&amp;rft.date=2021&amp;rft_id=https%3A%2F%2Funesdoc.unesco.org%2Fark%3A%2F48223%2Fpf0000381137.locale%3Den&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-155"><span class="mw-cite-backlink"><b><a href="#cite_ref-155">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.helsinkitimes.fi/themes/themes/science-and-technology/20454-unesco-member-states-adopt-first-global-agreement-on-ai-ethics.html">"UNESCO member states adopt first global agreement on AI ethics"</a>. <i>Helsinki Times</i>. 2021-11-26. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240925020210/https://www.helsinkitimes.fi/themes/themes/science-and-technology/20454-unesco-member-states-adopt-first-global-agreement-on-ai-ethics.html">Archived</a> from the original on 2024-09-25<span class="reference-accessdate">. Retrieved <span class="nowrap">2023-04-26</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Helsinki+Times&amp;rft.atitle=UNESCO+member+states+adopt+first+global+agreement+on+AI+ethics&amp;rft.date=2021-11-26&amp;rft_id=https%3A%2F%2Fwww.helsinkitimes.fi%2Fthemes%2Fthemes%2Fscience-and-technology%2F20454-unesco-member-states-adopt-first-global-agreement-on-ai-ethics.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-156"><span class="mw-cite-backlink"><b><a href="#cite_ref-156">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://hbr.org/2016/12/the-obama-administrations-roadmap-for-ai-policy">"The Obama Administration's Roadmap for AI Policy"</a>. <i>Harvard Business Review</i>. 2016-12-21. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0017-8012">0017-8012</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210122003445/https://hbr.org/2016/12/the-obama-administrations-roadmap-for-ai-policy">Archived</a> from the original on 2021-01-22<span class="reference-accessdate">. Retrieved <span class="nowrap">2021-03-16</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Harvard+Business+Review&amp;rft.atitle=The+Obama+Administration%27s+Roadmap+for+AI+Policy&amp;rft.date=2016-12-21&amp;rft.issn=0017-8012&amp;rft_id=https%3A%2F%2Fhbr.org%2F2016%2F12%2Fthe-obama-administrations-roadmap-for-ai-policy&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-157"><span class="mw-cite-backlink"><b><a href="#cite_ref-157">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://trumpwhitehouse.archives.gov/articles/accelerating-americas-leadership-in-artificial-intelligence/">"Accelerating America's Leadership in Artificial Intelligence – The White House"</a>. <i>trumpwhitehouse.archives.gov</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210225073748/https://trumpwhitehouse.archives.gov/articles/accelerating-americas-leadership-in-artificial-intelligence/">Archived</a> from the original on 2021-02-25<span class="reference-accessdate">. Retrieved <span class="nowrap">2021-03-16</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=trumpwhitehouse.archives.gov&amp;rft.atitle=Accelerating+America%27s+Leadership+in+Artificial+Intelligence+%E2%80%93+The+White+House&amp;rft_id=https%3A%2F%2Ftrumpwhitehouse.archives.gov%2Farticles%2Faccelerating-americas-leadership-in-artificial-intelligence%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-158"><span class="mw-cite-backlink"><b><a href="#cite_ref-158">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.federalregister.gov/documents/2020/01/13/2020-00261/request-for-comments-on-a-draft-memorandum-to-the-heads-of-executive-departments-and-agencies">"Request for Comments on a Draft Memorandum to the Heads of Executive Departments and Agencies, "Guidance for Regulation of Artificial Intelligence Applications"<span class="cs1-kern-right"></span>"</a>. <i>Federal Register</i>. 2020-01-13. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20201125060218/https://www.federalregister.gov/documents/2020/01/13/2020-00261/request-for-comments-on-a-draft-memorandum-to-the-heads-of-executive-departments-and-agencies">Archived</a> from the original on 2020-11-25<span class="reference-accessdate">. Retrieved <span class="nowrap">2020-11-28</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Federal+Register&amp;rft.atitle=Request+for+Comments+on+a+Draft+Memorandum+to+the+Heads+of+Executive+Departments+and+Agencies%2C+%22Guidance+for+Regulation+of+Artificial+Intelligence+Applications%22&amp;rft.date=2020-01-13&amp;rft_id=https%3A%2F%2Fwww.federalregister.gov%2Fdocuments%2F2020%2F01%2F13%2F2020-00261%2Frequest-for-comments-on-a-draft-memorandum-to-the-heads-of-executive-departments-and-agencies&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-159"><span class="mw-cite-backlink"><b><a href="#cite_ref-159">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.hpcwire.com/2019/05/14/ccc-offers-draft-20-year-ai-roadmap-seeks-comments/">"CCC Offers Draft 20-Year AI Roadmap; Seeks Comments"</a>. <i>HPCwire</i>. 2019-05-14. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210318060659/https://www.hpcwire.com/2019/05/14/ccc-offers-draft-20-year-ai-roadmap-seeks-comments/">Archived</a> from the original on 2021-03-18<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-07-22</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=HPCwire&amp;rft.atitle=CCC+Offers+Draft+20-Year+AI+Roadmap%3B+Seeks+Comments&amp;rft.date=2019-05-14&amp;rft_id=https%3A%2F%2Fwww.hpcwire.com%2F2019%2F05%2F14%2Fccc-offers-draft-20-year-ai-roadmap-seeks-comments%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-160"><span class="mw-cite-backlink"><b><a href="#cite_ref-160">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.cccblog.org/2019/05/13/request-comments-on-draft-a-20-year-community-roadmap-for-ai-research-in-the-us/">"Request Comments on Draft: A 20-Year Community Roadmap for AI Research in the US&#160;» CCC Blog"</a>. 13 May 2019. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190514193546/https://www.cccblog.org/2019/05/13/request-comments-on-draft-a-20-year-community-roadmap-for-ai-research-in-the-us/">Archived</a> from the original on 2019-05-14<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-07-22</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Request+Comments+on+Draft%3A+A+20-Year+Community+Roadmap+for+AI+Research+in+the+US+%C2%BB+CCC+Blog&amp;rft.date=2019-05-13&amp;rft_id=https%3A%2F%2Fwww.cccblog.org%2F2019%2F05%2F13%2Frequest-comments-on-draft-a-20-year-community-roadmap-for-ai-research-in-the-us%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-161"><span class="mw-cite-backlink"><b><a href="#cite_ref-161">^</a></b></span> <span class="reference-text"><span class="languageicon">(in Russian)</span> <a rel="nofollow" class="external text" href="https://www.kommersant.ru/doc/5089365">Интеллектуальные правила</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20211230212952/https://www.kommersant.ru/doc/5089365">Archived</a> 2021-12-30 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a> — <a href="/wiki/Kommersant" title="Kommersant">Kommersant</a>, 25.11.2021</span> </li> <li id="cite_note-162"><span class="mw-cite-backlink"><b><a href="#cite_ref-162">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGraceSalvatierDafoeZhang2018" class="citation arxiv cs1">Grace K, Salvatier J, Dafoe A, Zhang B, Evans O (2018-05-03). "When Will AI Exceed Human Performance? Evidence from AI Experts". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1705.08807">1705.08807</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.AI">cs.AI</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=When+Will+AI+Exceed+Human+Performance%3F+Evidence+from+AI+Experts&amp;rft.date=2018-05-03&amp;rft_id=info%3Aarxiv%2F1705.08807&amp;rft.aulast=Grace&amp;rft.aufirst=Katja&amp;rft.au=Salvatier%2C+John&amp;rft.au=Dafoe%2C+Allan&amp;rft.au=Zhang%2C+Baobao&amp;rft.au=Evans%2C+Owain&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-163"><span class="mw-cite-backlink"><b><a href="#cite_ref-163">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.technologyreview.com/2018/03/16/144630/china-wants-to-shape-the-global-future-of-artificial-intelligence/">"China wants to shape the global future of artificial intelligence"</a>. <i>MIT Technology Review</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20201120052853/https://www.technologyreview.com/2018/03/16/144630/china-wants-to-shape-the-global-future-of-artificial-intelligence/">Archived</a> from the original on 2020-11-20<span class="reference-accessdate">. Retrieved <span class="nowrap">2020-11-29</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=MIT+Technology+Review&amp;rft.atitle=China+wants+to+shape+the+global+future+of+artificial+intelligence&amp;rft_id=https%3A%2F%2Fwww.technologyreview.com%2F2018%2F03%2F16%2F144630%2Fchina-wants-to-shape-the-global-future-of-artificial-intelligence%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-164"><span class="mw-cite-backlink"><b><a href="#cite_ref-164">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFloridiCowlsBeltramettiChatila2018" class="citation journal cs1">Floridi L, Cowls J, Beltrametti M, Chatila R, Chazerand P, Dignum V, Luetge C, Madelin R, Pagallo U, Rossi F, Schafer B (2018-12-01). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6404626">"AI4People—An Ethical Framework for a Good AI Society: Opportunities, Risks, Principles, and Recommendations"</a>. <i>Minds and Machines</i>. <b>28</b> (4): <span class="nowrap">689–</span>707. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs11023-018-9482-5">10.1007/s11023-018-9482-5</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1572-8641">1572-8641</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a>&#160;<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6404626">6404626</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a>&#160;<a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/30930541">30930541</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Minds+and+Machines&amp;rft.atitle=AI4People%E2%80%94An+Ethical+Framework+for+a+Good+AI+Society%3A+Opportunities%2C+Risks%2C+Principles%2C+and+Recommendations&amp;rft.volume=28&amp;rft.issue=4&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E689-%3C%2Fspan%3E707&amp;rft.date=2018-12-01&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC6404626%23id-name%3DPMC&amp;rft.issn=1572-8641&amp;rft_id=info%3Apmid%2F30930541&amp;rft_id=info%3Adoi%2F10.1007%2Fs11023-018-9482-5&amp;rft.aulast=Floridi&amp;rft.aufirst=Luciano&amp;rft.au=Cowls%2C+Josh&amp;rft.au=Beltrametti%2C+Monica&amp;rft.au=Chatila%2C+Raja&amp;rft.au=Chazerand%2C+Patrice&amp;rft.au=Dignum%2C+Virginia&amp;rft.au=Luetge%2C+Christoph&amp;rft.au=Madelin%2C+Robert&amp;rft.au=Pagallo%2C+Ugo&amp;rft.au=Rossi%2C+Francesca&amp;rft.au=Schafer%2C+Burkhard&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC6404626&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-165"><span class="mw-cite-backlink"><b><a href="#cite_ref-165">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation magazine cs1"><a rel="nofollow" class="external text" href="https://www.wired.com/author/joanna-j-bryson/">"Joanna J. Bryson"</a>. <i>WIRED</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230315194630/https://www.wired.com/author/joanna-j-bryson/">Archived</a> from the original on 15 March 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">13 January</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=WIRED&amp;rft.atitle=Joanna+J.+Bryson&amp;rft_id=https%3A%2F%2Fwww.wired.com%2Fauthor%2Fjoanna-j-bryson%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-166"><span class="mw-cite-backlink"><b><a href="#cite_ref-166">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://engineering.nyu.edu/news/new-artificial-intelligence-research-institute-launches">"New Artificial Intelligence Research Institute Launches"</a>. 2017-11-20. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200918091106/https://engineering.nyu.edu/news/new-artificial-intelligence-research-institute-launches">Archived</a> from the original on 2020-09-18<span class="reference-accessdate">. Retrieved <span class="nowrap">2021-02-21</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=New+Artificial+Intelligence+Research+Institute+Launches&amp;rft.date=2017-11-20&amp;rft_id=https%3A%2F%2Fengineering.nyu.edu%2Fnews%2Fnew-artificial-intelligence-research-institute-launches&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-167"><span class="mw-cite-backlink"><b><a href="#cite_ref-167">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFJames_J._HughesLaGrandeur,_Kevin2017" class="citation book cs1">James J. Hughes, LaGrandeur, Kevin, eds. (15 March 2017). <a rel="nofollow" class="external text" href="https://www.worldcat.org/oclc/976407024"><i>Surviving the machine age: intelligent technology and the transformation of human work</i></a>. Cham, Switzerland: Palgrave Macmillan Cham. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-3-319-51165-8" title="Special:BookSources/978-3-319-51165-8"><bdi>978-3-319-51165-8</bdi></a>. <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/976407024">976407024</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210318060659/https://www.worldcat.org/title/surviving-the-machine-age-intelligent-technology-and-the-transformation-of-human-work/oclc/976407024">Archived</a> from the original on 18 March 2021<span class="reference-accessdate">. Retrieved <span class="nowrap">29 November</span> 2020</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Surviving+the+machine+age%3A+intelligent+technology+and+the+transformation+of+human+work&amp;rft.place=Cham%2C+Switzerland&amp;rft.pub=Palgrave+Macmillan+Cham&amp;rft.date=2017-03-15&amp;rft_id=info%3Aoclcnum%2F976407024&amp;rft.isbn=978-3-319-51165-8&amp;rft_id=https%3A%2F%2Fwww.worldcat.org%2Foclc%2F976407024&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-168"><span class="mw-cite-backlink"><b><a href="#cite_ref-168">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDanaher,_John2019" class="citation book cs1">Danaher, John (2019). <a rel="nofollow" class="external text" href="https://www.worldcat.org/oclc/1114334813"><i>Automation and utopia: human flourishing in a world without work</i></a>. Cambridge, Massachusetts: Harvard University Press. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-674-24220-3" title="Special:BookSources/978-0-674-24220-3"><bdi>978-0-674-24220-3</bdi></a>. <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/1114334813">1114334813</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Automation+and+utopia%3A+human+flourishing+in+a+world+without+work&amp;rft.place=Cambridge%2C+Massachusetts&amp;rft.pub=Harvard+University+Press&amp;rft.date=2019&amp;rft_id=info%3Aoclcnum%2F1114334813&amp;rft.isbn=978-0-674-24220-3&amp;rft.au=Danaher%2C+John&amp;rft_id=https%3A%2F%2Fwww.worldcat.org%2Foclc%2F1114334813&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-169"><span class="mw-cite-backlink"><b><a href="#cite_ref-169">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.tum.de/nc/en/about-tum/news/press-releases/details/35727/">"TUM Institute for Ethics in Artificial Intelligence officially opened"</a>. <i>www.tum.de</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20201210032545/https://www.tum.de/nc/en/about-tum/news/press-releases/details/35727/">Archived</a> from the original on 2020-12-10<span class="reference-accessdate">. Retrieved <span class="nowrap">2020-11-29</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=www.tum.de&amp;rft.atitle=TUM+Institute+for+Ethics+in+Artificial+Intelligence+officially+opened&amp;rft_id=https%3A%2F%2Fwww.tum.de%2Fnc%2Fen%2Fabout-tum%2Fnews%2Fpress-releases%2Fdetails%2F35727%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-170"><span class="mw-cite-backlink"><b><a href="#cite_ref-170">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCommunications2019" class="citation web cs1">Communications PK (2019-01-25). <a rel="nofollow" class="external text" href="https://news.harvard.edu/gazette/story/2019/01/harvard-works-to-embed-ethics-in-computer-science-curriculum/">"Harvard works to embed ethics in computer science curriculum"</a>. <i>Harvard Gazette</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240925020310/https://news.harvard.edu/gazette/story/2019/01/harvard-works-to-embed-ethics-in-computer-science-curriculum/">Archived</a> from the original on 2024-09-25<span class="reference-accessdate">. Retrieved <span class="nowrap">2023-04-06</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Harvard+Gazette&amp;rft.atitle=Harvard+works+to+embed+ethics+in+computer+science+curriculum&amp;rft.date=2019-01-25&amp;rft.aulast=Communications&amp;rft.aufirst=Paul+Karoff+SEAS&amp;rft_id=https%3A%2F%2Fnews.harvard.edu%2Fgazette%2Fstory%2F2019%2F01%2Fharvard-works-to-embed-ethics-in-computer-science-curriculum%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-171"><span class="mw-cite-backlink"><b><a href="#cite_ref-171">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLee2020" class="citation news cs1">Lee J (2020-02-08). <a rel="nofollow" class="external text" href="https://www.npr.org/sections/codeswitch/2020/02/08/770174171/when-bias-is-coded-into-our-technology">"When Bias Is Coded Into Our Technology"</a>. <i>NPR</i><span class="reference-accessdate">. Retrieved <span class="nowrap">2021-12-22</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=NPR&amp;rft.atitle=When+Bias+Is+Coded+Into+Our+Technology&amp;rft.date=2020-02-08&amp;rft.aulast=Lee&amp;rft.aufirst=Jennifer&amp;rft_id=https%3A%2F%2Fwww.npr.org%2Fsections%2Fcodeswitch%2F2020%2F02%2F08%2F770174171%2Fwhen-bias-is-coded-into-our-technology&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-172"><span class="mw-cite-backlink"><b><a href="#cite_ref-172">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation journal cs1"><a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fd41586-018-07718-x">"How one conference embraced diversity"</a>. <i>Nature</i>. <b>564</b> (7735): <span class="nowrap">161–</span>162. 2018-12-12. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fd41586-018-07718-x">10.1038/d41586-018-07718-x</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a>&#160;<a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/31123357">31123357</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:54481549">54481549</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Nature&amp;rft.atitle=How+one+conference+embraced+diversity&amp;rft.volume=564&amp;rft.issue=7735&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E161-%3C%2Fspan%3E162&amp;rft.date=2018-12-12&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A54481549%23id-name%3DS2CID&amp;rft_id=info%3Apmid%2F31123357&amp;rft_id=info%3Adoi%2F10.1038%2Fd41586-018-07718-x&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1038%252Fd41586-018-07718-x&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-173"><span class="mw-cite-backlink"><b><a href="#cite_ref-173">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRoose2020" class="citation news cs1">Roose K (2020-12-30). <a rel="nofollow" class="external text" href="https://www.nytimes.com/2020/12/30/technology/2020-good-tech-awards.html">"The 2020 Good Tech Awards"</a>. <i>The New York Times</i>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0362-4331">0362-4331</a><span class="reference-accessdate">. Retrieved <span class="nowrap">2021-12-21</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+New+York+Times&amp;rft.atitle=The+2020+Good+Tech+Awards&amp;rft.date=2020-12-30&amp;rft.issn=0362-4331&amp;rft.aulast=Roose&amp;rft.aufirst=Kevin&amp;rft_id=https%3A%2F%2Fwww.nytimes.com%2F2020%2F12%2F30%2Ftechnology%2F2020-good-tech-awards.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-174"><span class="mw-cite-backlink"><b><a href="#cite_ref-174">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLodge2014" class="citation journal cs1">Lodge P (2014). <a rel="nofollow" class="external text" href="https://doi.org/10.3998%2Fergo.12405314.0001.003">"Leibniz's Mill Argument Against Mechanical Materialism Revisited"</a>. <i>Ergo, an Open Access Journal of Philosophy</i>. <b>1</b> (20201214). <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.3998%2Fergo.12405314.0001.003">10.3998/ergo.12405314.0001.003</a></span>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<a rel="nofollow" class="external text" href="https://hdl.handle.net/2027%2Fspo.12405314.0001.003">2027/spo.12405314.0001.003</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2330-4014">2330-4014</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Ergo%2C+an+Open+Access+Journal+of+Philosophy&amp;rft.atitle=Leibniz%27s+Mill+Argument+Against+Mechanical+Materialism+Revisited&amp;rft.volume=1&amp;rft.issue=20201214&amp;rft.date=2014&amp;rft_id=info%3Ahdl%2F2027%2Fspo.12405314.0001.003&amp;rft.issn=2330-4014&amp;rft_id=info%3Adoi%2F10.3998%2Fergo.12405314.0001.003&amp;rft.aulast=Lodge&amp;rft.aufirst=Paul&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.3998%252Fergo.12405314.0001.003&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-175"><span class="mw-cite-backlink"><b><a href="#cite_ref-175">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBringsjordGovindarajulu2020" class="citation cs2">Bringsjord S, Govindarajulu NS (2020), <a rel="nofollow" class="external text" href="https://plato.stanford.edu/archives/sum2020/entries/artificial-intelligence/">"Artificial Intelligence"</a>, in Zalta EN, Nodelman U (eds.), <i>The Stanford Encyclopedia of Philosophy</i> (Summer 2020&#160;ed.), Metaphysics Research Lab, Stanford University, <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220308015735/https://plato.stanford.edu/archives/sum2020/entries/artificial-intelligence/">archived</a> from the original on 2022-03-08<span class="reference-accessdate">, retrieved <span class="nowrap">2023-12-08</span></span></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Artificial+Intelligence&amp;rft.btitle=The+Stanford+Encyclopedia+of+Philosophy&amp;rft.edition=Summer+2020&amp;rft.pub=Metaphysics+Research+Lab%2C+Stanford+University&amp;rft.date=2020&amp;rft.aulast=Bringsjord&amp;rft.aufirst=Selmer&amp;rft.au=Govindarajulu%2C+Naveen+Sundar&amp;rft_id=https%3A%2F%2Fplato.stanford.edu%2Farchives%2Fsum2020%2Fentries%2Fartificial-intelligence%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-176"><span class="mw-cite-backlink"><b><a href="#cite_ref-176">^</a></b></span> <span class="reference-text">Kulesz, O. (2018). "<a rel="nofollow" class="external text" href="https://unesdoc.unesco.org/ark:/48223/pf0000380584">Culture, Platforms and Machines</a>". UNESCO, Paris.</span> </li> <li id="cite_note-177"><span class="mw-cite-backlink"><b><a href="#cite_ref-177">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFJr1999" class="citation book cs1">Jr HC (1999-04-29). <a rel="nofollow" class="external text" href="https://books.google.com/books?id=FzwnridL72IC&amp;dq=digital+Much+of+his+work+was+then+spent+testing+the+boundaries+of+his+three+laws+to+see+where+they+would+break+down,+or+where+they+would+create+paradoxical+or+unanticipated+behavior.&amp;pg=PP13"><i>Information Technology and the Productivity Paradox: Assessing the Value of Investing in IT</i></a>. Oxford University Press. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-19-802838-3" title="Special:BookSources/978-0-19-802838-3"><bdi>978-0-19-802838-3</bdi></a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240925020211/https://books.google.com/books?id=FzwnridL72IC&amp;dq=digital+Much+of+his+work+was+then+spent+testing+the+boundaries+of+his+three+laws+to+see+where+they+would+break+down,+or+where+they+would+create+paradoxical+or+unanticipated+behavior.&amp;pg=PP13#v=onepage&amp;q&amp;f=false">Archived</a> from the original on 2024-09-25<span class="reference-accessdate">. Retrieved <span class="nowrap">2024-02-21</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Information+Technology+and+the+Productivity+Paradox%3A+Assessing+the+Value+of+Investing+in+IT&amp;rft.pub=Oxford+University+Press&amp;rft.date=1999-04-29&amp;rft.isbn=978-0-19-802838-3&amp;rft.aulast=Jr&amp;rft.aufirst=Henry+C.+Lucas&amp;rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3DFzwnridL72IC%26dq%3Ddigital%2BMuch%2Bof%2Bhis%2Bwork%2Bwas%2Bthen%2Bspent%2Btesting%2Bthe%2Bboundaries%2Bof%2Bhis%2Bthree%2Blaws%2Bto%2Bsee%2Bwhere%2Bthey%2Bwould%2Bbreak%2Bdown%2C%2Bor%2Bwhere%2Bthey%2Bwould%2Bcreate%2Bparadoxical%2Bor%2Bunanticipated%2Bbehavior.%26pg%3DPP13&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-Asimov2008-178"><span class="mw-cite-backlink"><b><a href="#cite_ref-Asimov2008_178-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAsimov2008" class="citation book cs1">Asimov I (2008). <a href="/wiki/I,_Robot" title="I, Robot"><i>I, Robot</i></a>. New York: Bantam. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-553-38256-3" title="Special:BookSources/978-0-553-38256-3"><bdi>978-0-553-38256-3</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=I%2C+Robot&amp;rft.place=New+York&amp;rft.pub=Bantam&amp;rft.date=2008&amp;rft.isbn=978-0-553-38256-3&amp;rft.aulast=Asimov&amp;rft.aufirst=Isaac&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-lacuna-179"><span class="mw-cite-backlink"><b><a href="#cite_ref-lacuna_179-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="lacuna" class="citation journal cs1">Bryson J, Diamantis M, Grant T (September 2017). <a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs10506-017-9214-9">"Of, for, and by the people: the legal lacuna of synthetic persons"</a>. <i>Artificial Intelligence and Law</i>. <b>25</b> (3): <span class="nowrap">273–</span>291. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs10506-017-9214-9">10.1007/s10506-017-9214-9</a></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Artificial+Intelligence+and+Law&amp;rft.atitle=Of%2C+for%2C+and+by+the+people%3A+the+legal+lacuna+of+synthetic+persons&amp;rft.volume=25&amp;rft.issue=3&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E273-%3C%2Fspan%3E291&amp;rft.date=2017-09&amp;rft_id=info%3Adoi%2F10.1007%2Fs10506-017-9214-9&amp;rft.aulast=Bryson&amp;rft.aufirst=Joanna&amp;rft.au=Diamantis%2C+Mihailis&amp;rft.au=Grant%2C+Thomas&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1007%252Fs10506-017-9214-9&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-principles-180"><span class="mw-cite-backlink"><b><a href="#cite_ref-principles_180-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="principles" class="citation web cs1"><a rel="nofollow" class="external text" href="https://epsrc.ukri.org/research/ourportfolio/themes/engineering/activities/principlesofrobotics/">"Principles of robotics"</a>. UK's EPSRC. September 2010. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180401004346/https://www.epsrc.ac.uk/research/ourportfolio/themes/engineering/activities/principlesofrobotics/">Archived</a> from the original on 1 April 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">10 January</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Principles+of+robotics&amp;rft.pub=UK%27s+EPSRC&amp;rft.date=2010-09&amp;rft_id=https%3A%2F%2Fepsrc.ukri.org%2Fresearch%2Fourportfolio%2Fthemes%2Fengineering%2Factivities%2Fprinciplesofrobotics%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-181"><span class="mw-cite-backlink"><b><a href="#cite_ref-181">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFYudkowsky2004" class="citation web cs1">Yudkowsky E (July 2004). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20120524150856/http://www.asimovlaws.com/articles/archives/2004/07/why_we_need_fri_1.html">"Why We Need Friendly AI"</a>. <i>3 laws unsafe</i>. Archived from <a rel="nofollow" class="external text" href="http://www.asimovlaws.com/articles/archives/2004/07/why_we_need_fri_1.html">the original</a> on May 24, 2012.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=3+laws+unsafe&amp;rft.atitle=Why+We+Need+Friendly+AI&amp;rft.date=2004-07&amp;rft.aulast=Yudkowsky&amp;rft.aufirst=Eliezer&amp;rft_id=http%3A%2F%2Fwww.asimovlaws.com%2Farticles%2Farchives%2F2004%2F07%2Fwhy_we_need_fri_1.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-182"><span class="mw-cite-backlink"><b><a href="#cite_ref-182">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAleksander2017" class="citation journal cs1">Aleksander I (March 2017). <a rel="nofollow" class="external text" href="http://journals.sagepub.com/doi/10.1057/s41265-016-0032-4">"Partners of Humans: A Realistic Assessment of the Role of Robots in the Foreseeable Future"</a>. <i>Journal of Information Technology</i>. <b>32</b> (1): <span class="nowrap">1–</span>9. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1057%2Fs41265-016-0032-4">10.1057/s41265-016-0032-4</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0268-3962">0268-3962</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:5288506">5288506</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240221065213/https://journals.sagepub.com/doi/10.1057/s41265-016-0032-4">Archived</a> from the original on 2024-02-21<span class="reference-accessdate">. Retrieved <span class="nowrap">2024-02-21</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Journal+of+Information+Technology&amp;rft.atitle=Partners+of+Humans%3A+A+Realistic+Assessment+of+the+Role+of+Robots+in+the+Foreseeable+Future&amp;rft.volume=32&amp;rft.issue=1&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E1-%3C%2Fspan%3E9&amp;rft.date=2017-03&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A5288506%23id-name%3DS2CID&amp;rft.issn=0268-3962&amp;rft_id=info%3Adoi%2F10.1057%2Fs41265-016-0032-4&amp;rft.aulast=Aleksander&amp;rft.aufirst=Igor&amp;rft_id=http%3A%2F%2Fjournals.sagepub.com%2Fdoi%2F10.1057%2Fs41265-016-0032-4&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-183"><span class="mw-cite-backlink"><b><a href="#cite_ref-183">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="http://www.popsci.com/scitech/article/2009-08/evolving-robots-learn-lie-hide-resources-each-other">Evolving Robots Learn To Lie To Each Other</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20090828105728/http://www.popsci.com/scitech/article/2009-08/evolving-robots-learn-lie-hide-resources-each-other">Archived</a> 2009-08-28 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>, Popular Science, August 18, 2009</span> </li> <li id="cite_note-Bassett-184"><span class="mw-cite-backlink"><b><a href="#cite_ref-Bassett_184-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBassettSteinmuellerVoss" class="citation web cs1">Bassett C, Steinmueller E, Voss G. <a rel="nofollow" class="external text" href="https://www.nesta.org.uk/report/better-made-up-the-mutual-influence-of-science-fiction-and-innovation/">"Better Made Up: The Mutual Influence of Science Fiction and Innovation"</a>. Nesta. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240503204507/https://www.nesta.org.uk/report/better-made-up-the-mutual-influence-of-science-fiction-and-innovation/">Archived</a> from the original on 3 May 2024<span class="reference-accessdate">. Retrieved <span class="nowrap">3 May</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Better+Made+Up%3A+The+Mutual+Influence+of+Science+Fiction+and+Innovation&amp;rft.pub=Nesta&amp;rft.aulast=Bassett&amp;rft.aufirst=Caroline&amp;rft.au=Steinmueller%2C+Ed&amp;rft.au=Voss%2C+Georgina&amp;rft_id=https%3A%2F%2Fwww.nesta.org.uk%2Freport%2Fbetter-made-up-the-mutual-influence-of-science-fiction-and-innovation%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-185"><span class="mw-cite-backlink"><b><a href="#cite_ref-185">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFVelasco2020" class="citation web cs1">Velasco G (2020-05-04). <a rel="nofollow" class="external text" href="https://revistaidees.cat/en/science-fiction-favors-engaging-debate-on-artificial-intelligence-and-ethics/">"Science-Fiction: A Mirror for the Future of Humankind"</a>. <i>IDEES</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210422164230/https://revistaidees.cat/en/science-fiction-favors-engaging-debate-on-artificial-intelligence-and-ethics/">Archived</a> from the original on 2021-04-22<span class="reference-accessdate">. Retrieved <span class="nowrap">2023-12-08</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=IDEES&amp;rft.atitle=Science-Fiction%3A+A+Mirror+for+the+Future+of+Humankind&amp;rft.date=2020-05-04&amp;rft.aulast=Velasco&amp;rft.aufirst=Guille&amp;rft_id=https%3A%2F%2Frevistaidees.cat%2Fen%2Fscience-fiction-favors-engaging-debate-on-artificial-intelligence-and-ethics%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-186"><span class="mw-cite-backlink"><b><a href="#cite_ref-186">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://readysteadycut.com/2021/05/14/recap-love-death-and-robots-season-2-episode-1-automated-customer-service-netflix-series/">"Love, Death &amp; Robots season 2, episode 1 recap - "Automated Customer Service"<span class="cs1-kern-right"></span>"</a>. <i>Ready Steady Cut</i>. 2021-05-14. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20211221035251/https://readysteadycut.com/2021/05/14/recap-love-death-and-robots-season-2-episode-1-automated-customer-service-netflix-series/">Archived</a> from the original on 2021-12-21<span class="reference-accessdate">. Retrieved <span class="nowrap">2021-12-21</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Ready+Steady+Cut&amp;rft.atitle=Love%2C+Death+%26+Robots+season+2%2C+episode+1+recap+-+%22Automated+Customer+Service%22&amp;rft.date=2021-05-14&amp;rft_id=https%3A%2F%2Freadysteadycut.com%2F2021%2F05%2F14%2Frecap-love-death-and-robots-season-2-episode-1-automated-customer-service-netflix-series%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-187"><span class="mw-cite-backlink"><b><a href="#cite_ref-187">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCave,_StephenDihal,_KantaDillon,_Sarah2020" class="citation book cs1">Cave, Stephen, Dihal, Kanta, Dillon, Sarah, eds. (14 February 2020). <a rel="nofollow" class="external text" href="https://www.worldcat.org/oclc/1143647559"><i>AI narratives: a history of imaginative thinking about intelligent machines</i></a> (First&#160;ed.). Oxford: Oxford University Press. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-19-258604-9" title="Special:BookSources/978-0-19-258604-9"><bdi>978-0-19-258604-9</bdi></a>. <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/1143647559">1143647559</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210318060703/https://www.worldcat.org/title/ai-narratives-a-history-of-imaginative-thinking-about-intelligent-machines/oclc/1143647559">Archived</a> from the original on 18 March 2021<span class="reference-accessdate">. Retrieved <span class="nowrap">11 November</span> 2020</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=AI+narratives%3A+a+history+of+imaginative+thinking+about+intelligent+machines&amp;rft.place=Oxford&amp;rft.edition=First&amp;rft.pub=Oxford+University+Press&amp;rft.date=2020-02-14&amp;rft_id=info%3Aoclcnum%2F1143647559&amp;rft.isbn=978-0-19-258604-9&amp;rft_id=https%3A%2F%2Fwww.worldcat.org%2Foclc%2F1143647559&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-188"><span class="mw-cite-backlink"><b><a href="#cite_ref-188">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFJerreat-Poole2020" class="citation journal cs1">Jerreat-Poole A (1 February 2020). <a rel="nofollow" class="external text" href="http://gamestudies.org/2001/articles/jerreatpoole">"Sick, Slow, Cyborg: Crip Futurity in Mass Effect"</a>. <i>Game Studies</i>. <b>20</b>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1604-7982">1604-7982</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20201209080256/http://gamestudies.org/2001/articles/jerreatpoole">Archived</a> from the original on 9 December 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">11 November</span> 2020</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Game+Studies&amp;rft.atitle=Sick%2C+Slow%2C+Cyborg%3A+Crip+Futurity+in+Mass+Effect&amp;rft.volume=20&amp;rft.date=2020-02-01&amp;rft.issn=1604-7982&amp;rft.aulast=Jerreat-Poole&amp;rft.aufirst=Adam&amp;rft_id=http%3A%2F%2Fgamestudies.org%2F2001%2Farticles%2Fjerreatpoole&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-189"><span class="mw-cite-backlink"><b><a href="#cite_ref-189">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://coffeeordie.com/detroit-become-human-will-challenge-your-morals-and-your-humanity/">"<span class="cs1-kern-left"></span>"Detroit: Become Human" Will Challenge your Morals and your Humanity"</a>. <i>Coffee or Die Magazine</i>. 2018-08-06. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20211209195312/https://coffeeordie.com/detroit-become-human-will-challenge-your-morals-and-your-humanity/">Archived</a> from the original on 2021-12-09<span class="reference-accessdate">. Retrieved <span class="nowrap">2021-12-07</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Coffee+or+Die+Magazine&amp;rft.atitle=%22Detroit%3A+Become+Human%22+Will+Challenge+your+Morals+and+your+Humanity&amp;rft.date=2018-08-06&amp;rft_id=https%3A%2F%2Fcoffeeordie.com%2Fdetroit-become-human-will-challenge-your-morals-and-your-humanity%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-190"><span class="mw-cite-backlink"><b><a href="#cite_ref-190">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCerquiWarwick2008" class="citation cs2">Cerqui D, Warwick K (2008), <a rel="nofollow" class="external text" href="http://link.springer.com/10.1007/978-1-4020-6591-0_14">"Re-Designing Humankind: The Rise of Cyborgs, a Desirable Goal?"</a>, <i>Philosophy and Design</i>, Dordrecht: Springer Netherlands, pp.&#160;<span class="nowrap">185–</span>195, <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2F978-1-4020-6591-0_14">10.1007/978-1-4020-6591-0_14</a>, <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-1-4020-6590-3" title="Special:BookSources/978-1-4020-6590-3"><bdi>978-1-4020-6590-3</bdi></a>, <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210318060701/https://link.springer.com/chapter/10.1007%2F978-1-4020-6591-0_14">archived</a> from the original on 2021-03-18<span class="reference-accessdate">, retrieved <span class="nowrap">2020-11-11</span></span></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Philosophy+and+Design&amp;rft.atitle=Re-Designing+Humankind%3A+The+Rise+of+Cyborgs%2C+a+Desirable+Goal%3F&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E185-%3C%2Fspan%3E195&amp;rft.date=2008&amp;rft_id=info%3Adoi%2F10.1007%2F978-1-4020-6591-0_14&amp;rft.isbn=978-1-4020-6590-3&amp;rft.aulast=Cerqui&amp;rft.aufirst=Daniela&amp;rft.au=Warwick%2C+Kevin&amp;rft_id=http%3A%2F%2Flink.springer.com%2F10.1007%2F978-1-4020-6591-0_14&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> <li id="cite_note-191"><span class="mw-cite-backlink"><b><a href="#cite_ref-191">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCaveDihal2020" class="citation journal cs1">Cave S, Dihal K (6 August 2020). <a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs13347-020-00415-6">"The Whiteness of AI"</a>. <i>Philosophy &amp; Technology</i>. <b>33</b> (4): <span class="nowrap">685–</span>703. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs13347-020-00415-6">10.1007/s13347-020-00415-6</a></span>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:225466550">225466550</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Philosophy+%26+Technology&amp;rft.atitle=The+Whiteness+of+AI&amp;rft.volume=33&amp;rft.issue=4&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E685-%3C%2Fspan%3E703&amp;rft.date=2020-08-06&amp;rft_id=info%3Adoi%2F10.1007%2Fs13347-020-00415-6&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A225466550%23id-name%3DS2CID&amp;rft.aulast=Cave&amp;rft.aufirst=Stephen&amp;rft.au=Dihal%2C+Kanta&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1007%252Fs13347-020-00415-6&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></span> </li> </ol></div></div> <div class="mw-heading mw-heading2"><h2 id="External_links">External links</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Ethics_of_artificial_intelligence&amp;action=edit&amp;section=35" title="Edit section: External links"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <ul><li><a rel="nofollow" class="external text" href="http://www.iep.utm.edu/ethic-ai/">Ethics of Artificial Intelligence</a> at the <i><a href="/wiki/Internet_Encyclopedia_of_Philosophy" title="Internet Encyclopedia of Philosophy">Internet Encyclopedia of Philosophy</a></i></li> <li><a rel="nofollow" class="external text" href="https://plato.stanford.edu/entries/ethics-ai/">Ethics of Artificial Intelligence and Robotics</a> at the <a href="/wiki/Stanford_Encyclopedia_of_Philosophy" title="Stanford Encyclopedia of Philosophy">Stanford Encyclopedia of Philosophy</a></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRussellHauertAltmanVeloso2015" class="citation journal cs1">Russell S, Hauert S, Altman R, Veloso M (May 2015). <a rel="nofollow" class="external text" href="https://doi.org/10.1038%2F521415a">"Robotics: Ethics of artificial intelligence"</a>. <i>Nature</i>. <b>521</b> (7553): <span class="nowrap">415–</span>418. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2015Natur.521..415.">2015Natur.521..415.</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1038%2F521415a">10.1038/521415a</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a>&#160;<a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/26017428">26017428</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:4452826">4452826</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Nature&amp;rft.atitle=Robotics%3A+Ethics+of+artificial+intelligence&amp;rft.volume=521&amp;rft.issue=7553&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E415-%3C%2Fspan%3E418&amp;rft.date=2015-05&amp;rft_id=info%3Adoi%2F10.1038%2F521415a&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A4452826%23id-name%3DS2CID&amp;rft_id=info%3Apmid%2F26017428&amp;rft_id=info%3Abibcode%2F2015Natur.521..415.&amp;rft.aulast=Russell&amp;rft.aufirst=S.&amp;rft.au=Hauert%2C+S.&amp;rft.au=Altman%2C+R.&amp;rft.au=Veloso%2C+M.&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1038%252F521415a&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></li> <li><a rel="nofollow" class="external text" href="http://news.bbc.co.uk/1/hi/sci/tech/1809769.stm">BBC News: Games to take on a life of their own</a></li> <li><a rel="nofollow" class="external text" href="http://www.dasboot.org/thorisson.htm">Who's Afraid of Robots?</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180322214031/http://www.dasboot.org/thorisson.htm">Archived</a> 2018-03-22 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>, an article on humanity's fear of artificial intelligence.</li> <li><a rel="nofollow" class="external text" href="https://web.archive.org/web/20080418122849/http://www.southernct.edu/organizations/rccs/resources/research/introduction/bynum_shrt_hist.html">A short history of computer ethics</a></li> <li><a rel="nofollow" class="external text" href="https://algorithmwatch.org/en/project/ai-ethics-guidelines-global-inventory/">AI Ethics Guidelines Global Inventory</a> by <a rel="nofollow" class="external text" href="https://algorithmwatch.org">Algorithmwatch</a></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHagendorff2020" class="citation journal cs1">Hagendorff T (March 2020). <a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs11023-020-09517-8">"The Ethics of AI Ethics: An Evaluation of Guidelines"</a>. <i>Minds and Machines</i>. <b>30</b> (1): <span class="nowrap">99–</span>120. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1903.03425">1903.03425</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs11023-020-09517-8">10.1007/s11023-020-09517-8</a></span>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:72940833">72940833</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Minds+and+Machines&amp;rft.atitle=The+Ethics+of+AI+Ethics%3A+An+Evaluation+of+Guidelines&amp;rft.volume=30&amp;rft.issue=1&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E99-%3C%2Fspan%3E120&amp;rft.date=2020-03&amp;rft_id=info%3Aarxiv%2F1903.03425&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A72940833%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1007%2Fs11023-020-09517-8&amp;rft.aulast=Hagendorff&amp;rft.aufirst=Thilo&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1007%252Fs11023-020-09517-8&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></li> <li>Sheludko, M. (December, 2023). <a rel="nofollow" class="external text" href="https://lasoft.org/blog/ethical-aspects-of-artificial-intelligence-challenges-and-imperatives/">Ethical Aspects of Artificial Intelligence: Challenges and Imperatives</a>. Software Development Blog.</li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFEisikovits" class="citation web cs1">Eisikovits N. <a rel="nofollow" class="external text" href="https://www.scientificamerican.com/article/ai-is-an-existential-threat-just-not-the-way-you-think/">"AI Is an Existential Threat—Just Not the Way You Think"</a>. <i>Scientific American</i><span class="reference-accessdate">. Retrieved <span class="nowrap">2024-03-04</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Scientific+American&amp;rft.atitle=AI+Is+an+Existential+Threat%E2%80%94Just+Not+the+Way+You+Think&amp;rft.aulast=Eisikovits&amp;rft.aufirst=Nir&amp;rft_id=https%3A%2F%2Fwww.scientificamerican.com%2Farticle%2Fai-is-an-existential-threat-just-not-the-way-you-think%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AEthics+of+artificial+intelligence" class="Z3988"></span></li></ul> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><style data-mw-deduplicate="TemplateStyles:r1236075235">.mw-parser-output .navbox{box-sizing:border-box;border:1px solid #a2a9b1;width:100%;clear:both;font-size:88%;text-align:center;padding:1px;margin:1em auto 0}.mw-parser-output .navbox .navbox{margin-top:0}.mw-parser-output .navbox+.navbox,.mw-parser-output .navbox+.navbox-styles+.navbox{margin-top:-1px}.mw-parser-output .navbox-inner,.mw-parser-output .navbox-subgroup{width:100%}.mw-parser-output .navbox-group,.mw-parser-output .navbox-title,.mw-parser-output .navbox-abovebelow{padding:0.25em 1em;line-height:1.5em;text-align:center}.mw-parser-output .navbox-group{white-space:nowrap;text-align:right}.mw-parser-output .navbox,.mw-parser-output .navbox-subgroup{background-color:#fdfdfd}.mw-parser-output .navbox-list{line-height:1.5em;border-color:#fdfdfd}.mw-parser-output .navbox-list-with-group{text-align:left;border-left-width:2px;border-left-style:solid}.mw-parser-output tr+tr>.navbox-abovebelow,.mw-parser-output tr+tr>.navbox-group,.mw-parser-output tr+tr>.navbox-image,.mw-parser-output tr+tr>.navbox-list{border-top:2px solid #fdfdfd}.mw-parser-output .navbox-title{background-color:#ccf}.mw-parser-output .navbox-abovebelow,.mw-parser-output .navbox-group,.mw-parser-output .navbox-subgroup .navbox-title{background-color:#ddf}.mw-parser-output .navbox-subgroup .navbox-group,.mw-parser-output .navbox-subgroup .navbox-abovebelow{background-color:#e6e6ff}.mw-parser-output .navbox-even{background-color:#f7f7f7}.mw-parser-output .navbox-odd{background-color:transparent}.mw-parser-output .navbox .hlist td dl,.mw-parser-output .navbox .hlist td ol,.mw-parser-output .navbox .hlist td ul,.mw-parser-output .navbox td.hlist dl,.mw-parser-output .navbox td.hlist ol,.mw-parser-output .navbox td.hlist ul{padding:0.125em 0}.mw-parser-output .navbox .navbar{display:block;font-size:100%}.mw-parser-output .navbox-title .navbar{float:left;text-align:left;margin-right:0.5em}body.skin--responsive .mw-parser-output .navbox-image img{max-width:none!important}@media print{body.ns-0 .mw-parser-output .navbox{display:none!important}}</style></div><div role="navigation" class="navbox" aria-labelledby="Artificial_intelligence_(AI)776" style="padding:3px"><table class="nowraplinks hlist mw-collapsible autocollapse navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th scope="col" class="navbox-title" colspan="2"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1239400231"><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Artificial_intelligence_navbox" title="Template:Artificial intelligence navbox"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/wiki/Template_talk:Artificial_intelligence_navbox" title="Template talk:Artificial intelligence navbox"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Artificial_intelligence_navbox" title="Special:EditPage/Template:Artificial intelligence navbox"><abbr title="Edit this template">e</abbr></a></li></ul></div><div id="Artificial_intelligence_(AI)776" style="font-size:114%;margin:0 4em"><a href="/wiki/Artificial_intelligence" title="Artificial intelligence">Artificial intelligence (AI)</a></div></th></tr><tr><td class="navbox-abovebelow" colspan="2"><div><a href="/wiki/History_of_artificial_intelligence" title="History of artificial intelligence">History</a> (<a href="/wiki/Timeline_of_artificial_intelligence" title="Timeline of artificial intelligence">timeline</a>)</div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Concepts</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Parameter" title="Parameter">Parameter</a> <ul><li><a href="/wiki/Hyperparameter_(machine_learning)" title="Hyperparameter (machine learning)">Hyperparameter</a></li></ul></li> <li><a href="/wiki/Loss_functions_for_classification" title="Loss functions for classification">Loss functions</a></li> <li><a href="/wiki/Regression_analysis" title="Regression analysis">Regression</a> <ul><li><a href="/wiki/Bias%E2%80%93variance_tradeoff" title="Bias–variance tradeoff">Bias–variance tradeoff</a></li> <li><a href="/wiki/Double_descent" title="Double descent">Double descent</a></li> <li><a href="/wiki/Overfitting" title="Overfitting">Overfitting</a></li></ul></li> <li><a href="/wiki/Cluster_analysis" title="Cluster analysis">Clustering</a></li> <li><a href="/wiki/Gradient_descent" title="Gradient descent">Gradient descent</a> <ul><li><a href="/wiki/Stochastic_gradient_descent" title="Stochastic gradient descent">SGD</a></li> <li><a href="/wiki/Quasi-Newton_method" title="Quasi-Newton method">Quasi-Newton method</a></li> <li><a href="/wiki/Conjugate_gradient_method" title="Conjugate gradient method">Conjugate gradient method</a></li></ul></li> <li><a href="/wiki/Backpropagation" title="Backpropagation">Backpropagation</a></li> <li><a href="/wiki/Attention_(machine_learning)" title="Attention (machine learning)">Attention</a></li> <li><a href="/wiki/Convolution" title="Convolution">Convolution</a></li> <li><a href="/wiki/Normalization_(machine_learning)" title="Normalization (machine learning)">Normalization</a> <ul><li><a href="/wiki/Batch_normalization" title="Batch normalization">Batchnorm</a></li></ul></li> <li><a href="/wiki/Activation_function" title="Activation function">Activation</a> <ul><li><a href="/wiki/Softmax_function" title="Softmax function">Softmax</a></li> <li><a href="/wiki/Sigmoid_function" title="Sigmoid function">Sigmoid</a></li> <li><a href="/wiki/Rectifier_(neural_networks)" title="Rectifier (neural networks)">Rectifier</a></li></ul></li> <li><a href="/wiki/Gating_mechanism" title="Gating mechanism">Gating</a></li> <li><a href="/wiki/Weight_initialization" title="Weight initialization">Weight initialization</a></li> <li><a href="/wiki/Regularization_(mathematics)" title="Regularization (mathematics)">Regularization</a></li> <li><a href="/wiki/Training,_validation,_and_test_data_sets" title="Training, validation, and test data sets">Datasets</a> <ul><li><a href="/wiki/Data_augmentation" title="Data augmentation">Augmentation</a></li></ul></li> <li><a href="/wiki/Prompt_engineering" title="Prompt engineering">Prompt engineering</a></li> <li><a href="/wiki/Reinforcement_learning" title="Reinforcement learning">Reinforcement learning</a> <ul><li><a href="/wiki/Q-learning" title="Q-learning">Q-learning</a></li> <li><a href="/wiki/State%E2%80%93action%E2%80%93reward%E2%80%93state%E2%80%93action" title="State–action–reward–state–action">SARSA</a></li> <li><a href="/wiki/Imitation_learning" title="Imitation learning">Imitation</a></li> <li><a href="/wiki/Policy_gradient_method" title="Policy gradient method">Policy gradient</a></li></ul></li> <li><a href="/wiki/Diffusion_process" title="Diffusion process">Diffusion</a></li> <li><a href="/wiki/Latent_diffusion_model" title="Latent diffusion model">Latent diffusion model</a></li> <li><a href="/wiki/Autoregressive_model" title="Autoregressive model">Autoregression</a></li> <li><a href="/wiki/Adversarial_machine_learning" title="Adversarial machine learning">Adversary</a></li> <li><a href="/wiki/Retrieval-augmented_generation" title="Retrieval-augmented generation">RAG</a></li> <li><a href="/wiki/Uncanny_valley" title="Uncanny valley">Uncanny valley</a></li> <li><a href="/wiki/Reinforcement_learning_from_human_feedback" title="Reinforcement learning from human feedback">RLHF</a></li> <li><a href="/wiki/Self-supervised_learning" title="Self-supervised learning">Self-supervised learning</a></li> <li><a href="/wiki/Recursive_self-improvement" title="Recursive self-improvement">Recursive self-improvement</a></li> <li><a href="/wiki/Word_embedding" title="Word embedding">Word embedding</a></li> <li><a href="/wiki/Hallucination_(artificial_intelligence)" title="Hallucination (artificial intelligence)">Hallucination</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Applications</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Machine_learning" title="Machine learning">Machine learning</a> <ul><li><a href="/wiki/Prompt_engineering#In-context_learning" title="Prompt engineering">In-context learning</a></li></ul></li> <li><a href="/wiki/Neural_network_(machine_learning)" title="Neural network (machine learning)">Artificial neural network</a> <ul><li><a href="/wiki/Deep_learning" title="Deep learning">Deep learning</a></li></ul></li> <li><a href="/wiki/Language_model" title="Language model">Language model</a> <ul><li><a href="/wiki/Large_language_model" title="Large language model">Large language model</a></li> <li><a href="/wiki/Neural_machine_translation" title="Neural machine translation">NMT</a></li></ul></li> <li><a href="/wiki/Artificial_general_intelligence" title="Artificial general intelligence">Artificial general intelligence</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Implementations</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"></div><table class="nowraplinks navbox-subgroup" style="border-spacing:0"><tbody><tr><th scope="row" class="navbox-group" style="width:1%">Audio–visual</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/AlexNet" title="AlexNet">AlexNet</a></li> <li><a href="/wiki/WaveNet" title="WaveNet">WaveNet</a></li> <li><a href="/wiki/Human_image_synthesis" title="Human image synthesis">Human image synthesis</a></li> <li><a href="/wiki/Handwriting_recognition" title="Handwriting recognition">HWR</a></li> <li><a href="/wiki/Optical_character_recognition" title="Optical character recognition">OCR</a></li> <li><a href="/wiki/Deep_learning_speech_synthesis" title="Deep learning speech synthesis">Speech synthesis</a> <ul><li><a href="/wiki/15.ai" title="15.ai">15.ai</a></li> <li><a href="/wiki/ElevenLabs" title="ElevenLabs">ElevenLabs</a></li></ul></li> <li><a href="/wiki/Speech_recognition" title="Speech recognition">Speech recognition</a> <ul><li><a href="/wiki/Whisper_(speech_recognition_system)" title="Whisper (speech recognition system)">Whisper</a></li></ul></li> <li><a href="/wiki/Facial_recognition_system" title="Facial recognition system">Facial recognition</a></li> <li><a href="/wiki/AlphaFold" title="AlphaFold">AlphaFold</a></li> <li><a href="/wiki/Text-to-image_model" title="Text-to-image model">Text-to-image models</a> <ul><li><a href="/wiki/Aurora_(text-to-image_model)" class="mw-redirect" title="Aurora (text-to-image model)">Aurora</a></li> <li><a href="/wiki/DALL-E" title="DALL-E">DALL-E</a></li> <li><a href="/wiki/Adobe_Firefly" title="Adobe Firefly">Firefly</a></li> <li><a href="/wiki/Flux_(text-to-image_model)" title="Flux (text-to-image model)">Flux</a></li> <li><a href="/wiki/Ideogram_(text-to-image_model)" title="Ideogram (text-to-image model)">Ideogram</a></li> <li><a href="/wiki/Google_Brain#Text-to-image_model" title="Google Brain">Imagen</a></li> <li><a href="/wiki/Midjourney" title="Midjourney">Midjourney</a></li> <li><a href="/wiki/Stable_Diffusion" title="Stable Diffusion">Stable Diffusion</a></li></ul></li> <li><a href="/wiki/Text-to-video_model" title="Text-to-video model">Text-to-video models</a> <ul><li><a href="/wiki/Dream_Machine_(text-to-video_model)" title="Dream Machine (text-to-video model)">Dream Machine</a></li> <li><a href="/wiki/Runway_(company)#Gen-3_Alpha" title="Runway (company)">Gen-3 Alpha</a></li> <li><a href="/wiki/MiniMax_(company)#Hailuo_AI" title="MiniMax (company)">Hailuo AI</a></li> <li><a href="/wiki/Kling_(text-to-video_model)" class="mw-redirect" title="Kling (text-to-video model)">Kling</a></li> <li><a href="/wiki/Sora_(text-to-video_model)" title="Sora (text-to-video model)">Sora</a></li> <li><a href="/wiki/Google_DeepMind#Video_model" title="Google DeepMind">Veo</a></li></ul></li> <li><a href="/wiki/Music_and_artificial_intelligence" title="Music and artificial intelligence">Music generation</a> <ul><li><a href="/wiki/Suno_AI" title="Suno AI">Suno AI</a></li> <li><a href="/wiki/Udio" title="Udio">Udio</a></li></ul></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Text</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Word2vec" title="Word2vec">Word2vec</a></li> <li><a href="/wiki/Seq2seq" title="Seq2seq">Seq2seq</a></li> <li><a href="/wiki/GloVe" title="GloVe">GloVe</a></li> <li><a href="/wiki/BERT_(language_model)" title="BERT (language model)">BERT</a></li> <li><a href="/wiki/T5_(language_model)" title="T5 (language model)">T5</a></li> <li><a href="/wiki/Llama_(language_model)" title="Llama (language model)">Llama</a></li> <li><a href="/wiki/Chinchilla_(language_model)" title="Chinchilla (language model)">Chinchilla AI</a></li> <li><a href="/wiki/PaLM" title="PaLM">PaLM</a></li> <li><a href="/wiki/Generative_pre-trained_transformer" title="Generative pre-trained transformer">GPT</a> <ul><li><a href="/wiki/GPT-1" title="GPT-1">1</a></li> <li><a href="/wiki/GPT-2" title="GPT-2">2</a></li> <li><a href="/wiki/GPT-3" title="GPT-3">3</a></li> <li><a href="/wiki/GPT-J" title="GPT-J">J</a></li> <li><a href="/wiki/ChatGPT" title="ChatGPT">ChatGPT</a></li> <li><a href="/wiki/GPT-4" title="GPT-4">4</a></li> <li><a href="/wiki/GPT-4o" title="GPT-4o">4o</a></li> <li><a href="/wiki/OpenAI_o1" title="OpenAI o1">o1</a></li> <li><a href="/wiki/OpenAI_o3" title="OpenAI o3">o3</a></li></ul></li> <li><a href="/wiki/Claude_(language_model)" title="Claude (language model)">Claude</a></li> <li><a href="/wiki/Gemini_(language_model)" title="Gemini (language model)">Gemini</a> <ul><li><a href="/wiki/Gemini_(chatbot)" title="Gemini (chatbot)">chatbot</a></li></ul></li> <li><a href="/wiki/Grok_(chatbot)" title="Grok (chatbot)">Grok</a></li> <li><a href="/wiki/LaMDA" title="LaMDA">LaMDA</a></li> <li><a href="/wiki/BLOOM_(language_model)" title="BLOOM (language model)">BLOOM</a></li> <li><a href="/wiki/Project_Debater" title="Project Debater">Project Debater</a></li> <li><a href="/wiki/IBM_Watson" title="IBM Watson">IBM Watson</a></li> <li><a href="/wiki/IBM_Watsonx" title="IBM Watsonx">IBM Watsonx</a></li> <li><a href="/wiki/IBM_Granite" title="IBM Granite">Granite</a></li> <li><a href="/wiki/Huawei_PanGu" title="Huawei PanGu">PanGu-Σ</a></li> <li><a href="/wiki/DeepSeek" title="DeepSeek">DeepSeek</a></li> <li><a href="/wiki/Qwen" title="Qwen">Qwen</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Decisional</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/AlphaGo" title="AlphaGo">AlphaGo</a></li> <li><a href="/wiki/AlphaZero" title="AlphaZero">AlphaZero</a></li> <li><a href="/wiki/OpenAI_Five" title="OpenAI Five">OpenAI Five</a></li> <li><a href="/wiki/Self-driving_car" title="Self-driving car">Self-driving car</a></li> <li><a href="/wiki/MuZero" title="MuZero">MuZero</a></li> <li><a href="/wiki/Action_selection" title="Action selection">Action selection</a> <ul><li><a href="/wiki/AutoGPT" title="AutoGPT">AutoGPT</a></li></ul></li> <li><a href="/wiki/Robot_control" title="Robot control">Robot control</a></li></ul> </div></td></tr></tbody></table><div></div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">People</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Alan_Turing" title="Alan Turing">Alan Turing</a></li> <li><a href="/wiki/Warren_Sturgis_McCulloch" title="Warren Sturgis McCulloch">Warren Sturgis McCulloch</a></li> <li><a href="/wiki/Walter_Pitts" title="Walter Pitts">Walter Pitts</a></li> <li><a href="/wiki/John_von_Neumann" title="John von Neumann">John von Neumann</a></li> <li><a href="/wiki/Claude_Shannon" title="Claude Shannon">Claude Shannon</a></li> <li><a href="/wiki/Marvin_Minsky" title="Marvin Minsky">Marvin Minsky</a></li> <li><a href="/wiki/John_McCarthy_(computer_scientist)" title="John McCarthy (computer scientist)">John McCarthy</a></li> <li><a href="/wiki/Nathaniel_Rochester_(computer_scientist)" title="Nathaniel Rochester (computer scientist)">Nathaniel Rochester</a></li> <li><a href="/wiki/Allen_Newell" title="Allen Newell">Allen Newell</a></li> <li><a href="/wiki/Cliff_Shaw" title="Cliff Shaw">Cliff Shaw</a></li> <li><a href="/wiki/Herbert_A._Simon" title="Herbert A. Simon">Herbert A. Simon</a></li> <li><a href="/wiki/Oliver_Selfridge" title="Oliver Selfridge">Oliver Selfridge</a></li> <li><a href="/wiki/Frank_Rosenblatt" title="Frank Rosenblatt">Frank Rosenblatt</a></li> <li><a href="/wiki/Bernard_Widrow" title="Bernard Widrow">Bernard Widrow</a></li> <li><a href="/wiki/Joseph_Weizenbaum" title="Joseph Weizenbaum">Joseph Weizenbaum</a></li> <li><a href="/wiki/Seymour_Papert" title="Seymour Papert">Seymour Papert</a></li> <li><a href="/wiki/Seppo_Linnainmaa" title="Seppo Linnainmaa">Seppo Linnainmaa</a></li> <li><a href="/wiki/Paul_Werbos" title="Paul Werbos">Paul Werbos</a></li> <li><a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Jürgen Schmidhuber</a></li> <li><a href="/wiki/Yann_LeCun" title="Yann LeCun">Yann LeCun</a></li> <li><a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Geoffrey Hinton</a></li> <li><a href="/wiki/John_Hopfield" title="John Hopfield">John Hopfield</a></li> <li><a href="/wiki/Yoshua_Bengio" title="Yoshua Bengio">Yoshua Bengio</a></li> <li><a href="/wiki/Lotfi_A._Zadeh" title="Lotfi A. Zadeh">Lotfi A. Zadeh</a></li> <li><a href="/wiki/Stephen_Grossberg" title="Stephen Grossberg">Stephen Grossberg</a></li> <li><a href="/wiki/Alex_Graves_(computer_scientist)" title="Alex Graves (computer scientist)">Alex Graves</a></li> <li><a href="/wiki/Andrew_Ng" title="Andrew Ng">Andrew Ng</a></li> <li><a href="/wiki/Fei-Fei_Li" title="Fei-Fei Li">Fei-Fei Li</a></li> <li><a href="/wiki/Alex_Krizhevsky" title="Alex Krizhevsky">Alex Krizhevsky</a></li> <li><a href="/wiki/Ilya_Sutskever" title="Ilya Sutskever">Ilya Sutskever</a></li> <li><a href="/wiki/Demis_Hassabis" title="Demis Hassabis">Demis Hassabis</a></li> <li><a href="/wiki/David_Silver_(computer_scientist)" title="David Silver (computer scientist)">David Silver</a></li> <li><a href="/wiki/Ian_Goodfellow" title="Ian Goodfellow">Ian Goodfellow</a></li> <li><a href="/wiki/Andrej_Karpathy" title="Andrej Karpathy">Andrej Karpathy</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Architectures</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Neural_Turing_machine" title="Neural Turing machine">Neural Turing machine</a></li> <li><a href="/wiki/Differentiable_neural_computer" title="Differentiable neural computer">Differentiable neural computer</a></li> <li><a href="/wiki/Transformer_(deep_learning_architecture)" title="Transformer (deep learning architecture)">Transformer</a> <ul><li><a href="/wiki/Vision_transformer" title="Vision transformer">Vision transformer (ViT)</a></li></ul></li> <li><a href="/wiki/Recurrent_neural_network" title="Recurrent neural network">Recurrent neural network (RNN)</a></li> <li><a href="/wiki/Long_short-term_memory" title="Long short-term memory">Long short-term memory (LSTM)</a></li> <li><a href="/wiki/Gated_recurrent_unit" title="Gated recurrent unit">Gated recurrent unit (GRU)</a></li> <li><a href="/wiki/Echo_state_network" title="Echo state network">Echo state network</a></li> <li><a href="/wiki/Multilayer_perceptron" title="Multilayer perceptron">Multilayer perceptron (MLP)</a></li> <li><a href="/wiki/Convolutional_neural_network" title="Convolutional neural network">Convolutional neural network (CNN)</a></li> <li><a href="/wiki/Residual_neural_network" title="Residual neural network">Residual neural network (RNN)</a></li> <li><a href="/wiki/Highway_network" title="Highway network">Highway network</a></li> <li><a href="/wiki/Mamba_(deep_learning_architecture)" title="Mamba (deep learning architecture)">Mamba</a></li> <li><a href="/wiki/Autoencoder" title="Autoencoder">Autoencoder</a></li> <li><a href="/wiki/Variational_autoencoder" title="Variational autoencoder">Variational autoencoder (VAE)</a></li> <li><a href="/wiki/Generative_adversarial_network" title="Generative adversarial network">Generative adversarial network (GAN)</a></li> <li><a href="/wiki/Graph_neural_network" title="Graph neural network">Graph neural network (GNN)</a></li></ul> </div></td></tr><tr><td class="navbox-abovebelow" colspan="2"><div> <ul><li><span class="noviewer" typeof="mw:File"><a href="/wiki/File:Symbol_portal_class.svg" class="mw-file-description" title="Portal"><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/e/e2/Symbol_portal_class.svg/16px-Symbol_portal_class.svg.png" decoding="async" width="16" height="16" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/e/e2/Symbol_portal_class.svg/23px-Symbol_portal_class.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/e/e2/Symbol_portal_class.svg/31px-Symbol_portal_class.svg.png 2x" data-file-width="180" data-file-height="185" /></a></span> Portals <ul><li><a href="/wiki/Portal:Technology" title="Portal:Technology">Technology</a></li></ul></li> <li><span class="noviewer" typeof="mw:File"><span title="Category"><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/16px-Symbol_category_class.svg.png" decoding="async" width="16" height="16" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/23px-Symbol_category_class.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/31px-Symbol_category_class.svg.png 2x" data-file-width="180" data-file-height="185" /></span></span> <a href="/wiki/Category:Artificial_intelligence" title="Category:Artificial intelligence">Category</a> <ul><li><a href="/wiki/Category:Artificial_neural_networks" title="Category:Artificial neural networks">Artificial neural networks</a></li> <li><a href="/wiki/Category:Machine_learning" title="Category:Machine learning">Machine learning</a></li></ul></li> <li><span class="noviewer" typeof="mw:File"><span title="List-Class article"><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/d/db/Symbol_list_class.svg/16px-Symbol_list_class.svg.png" decoding="async" width="16" height="16" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/d/db/Symbol_list_class.svg/23px-Symbol_list_class.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/d/db/Symbol_list_class.svg/31px-Symbol_list_class.svg.png 2x" data-file-width="180" data-file-height="185" /></span></span> List <ul><li><a href="/wiki/List_of_artificial_intelligence_companies" title="List of artificial intelligence companies">Companies</a></li> <li><a href="/wiki/List_of_artificial_intelligence_projects" title="List of artificial intelligence projects">Projects</a></li></ul></li></ul> </div></td></tr></tbody></table></div> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236075235"></div><div role="navigation" class="navbox" aria-labelledby="Ethics256" style="padding:3px"><table class="nowraplinks hlist mw-collapsible autocollapse navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th scope="col" class="navbox-title" colspan="2"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1239400231"><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Ethics" title="Template:Ethics"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/wiki/Template_talk:Ethics" title="Template talk:Ethics"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Ethics" title="Special:EditPage/Template:Ethics"><abbr title="Edit this template">e</abbr></a></li></ul></div><div id="Ethics256" style="font-size:114%;margin:0 4em"><a href="/wiki/Ethics" title="Ethics">Ethics</a></div></th></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Normative_ethics" title="Normative ethics">Normative</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Consequentialism" title="Consequentialism">Consequentialism</a></li> <li><a href="/wiki/Deontological_ethics" class="mw-redirect" title="Deontological ethics">Deontology</a></li> <li><a href="/wiki/Ethics_of_care" title="Ethics of care">Care</a></li> <li><a href="/wiki/Moral_particularism" title="Moral particularism">Particularism</a></li> <li><a href="/wiki/Pragmatic_ethics" title="Pragmatic ethics">Pragmatic</a></li> <li><a href="/wiki/Role_ethics" title="Role ethics">Role</a></li> <li><a href="/wiki/Suffering-focused_ethics" title="Suffering-focused ethics">Suffering-focused</a></li> <li><a href="/wiki/Utilitarianism" title="Utilitarianism">Utilitarianism</a></li> <li><a href="/wiki/Virtue_ethics" title="Virtue ethics">Virtue</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Applied_ethics" title="Applied ethics">Applied</a></th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Animal_ethics" title="Animal ethics">Animal</a></li> <li><a class="mw-selflink selflink">Artificial intelligence</a></li> <li><a href="/wiki/Bioethics" title="Bioethics">Bio</a></li> <li><a href="/wiki/Business_ethics" title="Business ethics">Business</a></li> <li><a href="/wiki/Computer_ethics" title="Computer ethics">Computer</a></li> <li><a href="/wiki/Discourse_ethics" title="Discourse ethics">Discourse</a></li> <li><a href="/wiki/Engineering_ethics" title="Engineering ethics">Engineering</a></li> <li><a href="/wiki/Environmental_ethics" title="Environmental ethics">Environmental</a> <ul><li><a href="/wiki/Land_ethic" title="Land ethic">Land</a></li></ul></li> <li><a href="/wiki/Legal_ethics" title="Legal ethics">Legal</a></li> <li><a href="/wiki/Machine_ethics" title="Machine ethics">Machine</a></li> <li><a href="/wiki/Ethics_of_eating_meat" title="Ethics of eating meat">Meat eating</a></li> <li><a href="/wiki/Media_ethics" title="Media ethics">Media</a></li> <li><a href="/wiki/Medical_ethics" title="Medical ethics">Medical</a></li> <li><a href="/wiki/Nursing_ethics" title="Nursing ethics">Nursing</a></li> <li><a href="/wiki/Professional_ethics" title="Professional ethics">Professional</a></li> <li><a href="/wiki/Programming_ethics" title="Programming ethics">Programming</a></li> <li><a href="/wiki/Research_ethics" title="Research ethics">Research</a></li> <li><a href="/wiki/Sexual_ethics" title="Sexual ethics">Sexual</a></li> <li><a href="/wiki/Ethics_of_technology" title="Ethics of technology">Technology</a></li> <li><a href="/wiki/Ethics_of_terraforming" title="Ethics of terraforming">Terraforming</a></li> <li><a href="/wiki/Ethics_of_uncertain_sentience" title="Ethics of uncertain sentience">Uncertain sentience</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Metaethics" title="Metaethics">Meta</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Moral_absolutism" title="Moral absolutism">Absolutism</a></li> <li><a href="/wiki/Axiological_ethics" title="Axiological ethics">Axiology</a></li> <li><a href="/wiki/Cognitivism_(ethics)" title="Cognitivism (ethics)">Cognitivism</a> <ul><li><a href="/wiki/Moral_realism" title="Moral realism">Realism</a> <ul><li><a href="/wiki/Ethical_naturalism" title="Ethical naturalism">Naturalism</a></li> <li><a href="/wiki/Ethical_non-naturalism" title="Ethical non-naturalism">Non-naturalism</a></li></ul></li> <li><a href="/wiki/Ethical_subjectivism" title="Ethical subjectivism">Subjectivism</a> <ul><li><a href="/wiki/Ideal_observer_theory" title="Ideal observer theory">Ideal observer theory</a></li> <li><a href="/wiki/Divine_command_theory" title="Divine command theory">Divine command theory</a></li></ul></li></ul></li> <li><a href="/wiki/Moral_constructivism" title="Moral constructivism">Constructivism</a></li> <li><a href="/wiki/Euthyphro_dilemma" title="Euthyphro dilemma">Euthyphro dilemma</a></li> <li><a href="/wiki/Ethical_intuitionism" title="Ethical intuitionism">Intuitionism</a></li> <li><a href="/wiki/Moral_nihilism" title="Moral nihilism">Nihilism</a></li> <li><a href="/wiki/Non-cognitivism" title="Non-cognitivism">Non-cognitivism</a> <ul><li><a href="/wiki/Emotivism" title="Emotivism">Emotivism</a></li> <li><a href="/wiki/Expressivism" title="Expressivism">Expressivism</a></li> <li><a href="/wiki/Quasi-realism" title="Quasi-realism">Quasi-realism</a></li> <li><a href="/wiki/Universal_prescriptivism" title="Universal prescriptivism">Universal prescriptivism</a></li></ul></li> <li><a href="/wiki/Moral_rationalism" title="Moral rationalism">Rationalism</a></li> <li><a href="/wiki/Moral_relativism" title="Moral relativism">Relativism</a></li> <li><a href="/wiki/Moral_skepticism" title="Moral skepticism">Skepticism</a></li> <li><a href="/wiki/Moral_universalism" title="Moral universalism">Universalism</a> <ul><li><a href="/wiki/Value_pluralism" title="Value pluralism">Value monism – Value pluralism</a></li></ul></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Schools</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Buddhist_ethics" title="Buddhist ethics">Buddhist</a></li> <li><a href="/wiki/Christian_ethics" title="Christian ethics">Christian</a></li> <li><a href="/wiki/Confucianism" title="Confucianism">Confucian</a></li> <li><a href="/wiki/Epicureanism" title="Epicureanism">Epicurean</a></li> <li><a href="/wiki/Existentialism" title="Existentialism">Existentialist</a></li> <li><a href="/wiki/Feminist_ethics" title="Feminist ethics">Feminist</a></li> <li><a href="/wiki/Islamic_ethics" title="Islamic ethics">Islamic</a></li> <li><a href="/wiki/Jewish_ethics" title="Jewish ethics">Jewish</a></li> <li><a href="/wiki/Kantian_ethics" title="Kantian ethics">Kantian</a></li> <li><a href="/wiki/Rousseauism" class="mw-redirect" title="Rousseauism">Rousseauian</a></li> <li><a href="/wiki/Stoicism" title="Stoicism">Stoic</a></li> <li><a href="/wiki/Taoism" title="Taoism">Tao</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Concepts</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Authority" title="Authority">Authority</a></li> <li><a href="/wiki/Autonomy" title="Autonomy">Autonomy</a></li> <li><a href="/wiki/Common_sense" title="Common sense">Common sense</a></li> <li><a href="/wiki/Compassion" title="Compassion">Compassion</a></li> <li><a href="/wiki/Conscience" title="Conscience">Conscience</a></li> <li><a href="/wiki/Consent" title="Consent">Consent</a></li> <li><a href="/wiki/Culture_of_life" title="Culture of life">Culture of life</a></li> <li><a href="/wiki/Dignity" title="Dignity">Dignity</a></li> <li><a href="/wiki/Double_standard" title="Double standard">Double standard</a></li> <li><a href="/wiki/Duty" title="Duty">Duty</a></li> <li><a href="/wiki/Egalitarianism" title="Egalitarianism">Equality</a></li> <li><a href="/wiki/Etiquette" title="Etiquette">Etiquette</a></li> <li><a href="/wiki/Eudaimonia" title="Eudaimonia">Eudaimonia</a></li> <li><a href="/wiki/Family_values" title="Family values">Family values</a></li> <li><a href="/wiki/Fidelity" title="Fidelity">Fidelity</a></li> <li><a href="/wiki/Free_will" title="Free will">Free will</a></li> <li><a href="/wiki/Good_and_evil" title="Good and evil">Good and evil</a> <ul><li><a href="/wiki/Good" title="Good">Good</a></li> <li><a href="/wiki/Evil" title="Evil">Evil</a></li> <li><a href="/wiki/Problem_of_evil" title="Problem of evil">Problem of evil</a></li></ul></li> <li><a href="/wiki/Greed" title="Greed">Greed</a></li> <li><a href="/wiki/Happiness" title="Happiness">Happiness</a></li> <li><a href="/wiki/Honour" title="Honour">Honour</a></li> <li><a href="/wiki/Ideal_(ethics)" title="Ideal (ethics)">Ideal</a></li> <li><a href="/wiki/Immorality" title="Immorality">Immorality</a></li> <li><a href="/wiki/Justice" title="Justice">Justice</a></li> <li><a href="/wiki/Liberty" title="Liberty">Liberty</a></li> <li><a href="/wiki/Loyalty" title="Loyalty">Loyalty</a></li> <li><a href="/wiki/Moral_agency" title="Moral agency">Moral agency</a></li> <li><a href="/wiki/Moral_courage" title="Moral courage">Moral courage</a></li> <li><a href="/wiki/Moral_hierarchy" title="Moral hierarchy">Moral hierarchy</a></li> <li><a href="/wiki/Moral_imperative" title="Moral imperative">Moral imperative</a></li> <li><a href="/wiki/Morality" title="Morality">Morality</a></li> <li><a href="/wiki/Norm_(philosophy)" title="Norm (philosophy)">Norm</a></li> <li><a href="/wiki/Pacifism" title="Pacifism">Pacifism</a></li> <li><a href="/wiki/Political_freedom" title="Political freedom">Political freedom</a></li> <li><a href="/wiki/Precept" title="Precept">Precept</a></li> <li><a href="/wiki/Rights" title="Rights">Rights</a></li> <li><a href="/wiki/Self-discipline" class="mw-redirect" title="Self-discipline">Self-discipline</a></li> <li><a href="/wiki/Suffering" title="Suffering">Suffering</a></li> <li><a href="/wiki/Stewardship" title="Stewardship">Stewardship</a></li> <li><a href="/wiki/Sympathy" title="Sympathy">Sympathy</a></li> <li><a href="/wiki/Theodicy" title="Theodicy">Theodicy</a></li> <li><a href="/wiki/Torture" title="Torture">Torture</a></li> <li><a href="/wiki/Trust_(social_science)" title="Trust (social science)">Trust</a></li> <li><a href="/wiki/Value_(ethics)" title="Value (ethics)">Value</a> <ul><li><a href="/wiki/Intrinsic_value_(ethics)" title="Intrinsic value (ethics)">Intrinsic</a></li> <li><a href="/wiki/Japanese_values" title="Japanese values">Japan</a></li> <li><a href="/wiki/Values_(Western_philosophy)" class="mw-redirect" title="Values (Western philosophy)">Western</a></li></ul></li> <li><a href="/wiki/Vice" title="Vice">Vice</a></li> <li><a href="/wiki/Virtue" title="Virtue">Virtue</a></li> <li><a href="/wiki/Vow" title="Vow">Vow</a></li> <li><a href="/wiki/Wrongdoing" title="Wrongdoing">Wrong</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/List_of_ethicists" title="List of ethicists">Ethicists<br /></a></th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Laozi" title="Laozi">Laozi</a></li> <li><a href="/wiki/Socrates" title="Socrates">Socrates</a></li> <li><a href="/wiki/Plato" title="Plato">Plato</a></li> <li><a href="/wiki/Aristotle" title="Aristotle">Aristotle</a></li> <li><a href="/wiki/Diogenes" title="Diogenes">Diogenes</a></li> <li><a href="/wiki/Thiruvalluvar" title="Thiruvalluvar">Valluvar</a></li> <li><a href="/wiki/Cicero" title="Cicero">Cicero</a></li> <li><a href="/wiki/Confucius" title="Confucius">Confucius</a></li> <li><a href="/wiki/Augustine_of_Hippo" title="Augustine of Hippo">Augustine</a></li> <li><a href="/wiki/Mencius" title="Mencius">Mencius</a></li> <li><a href="/wiki/Mozi" title="Mozi">Mozi</a></li> <li><a href="/wiki/Xunzi_(philosopher)" title="Xunzi (philosopher)">Xunzi</a></li> <li><a href="/wiki/Thomas_Aquinas" title="Thomas Aquinas">Aquinas</a></li> <li><a href="/wiki/Baruch_Spinoza" title="Baruch Spinoza">Spinoza</a></li> <li><a href="/wiki/Joseph_Butler" title="Joseph Butler">Butler</a></li> <li><a href="/wiki/David_Hume" title="David Hume">Hume</a></li> <li><a href="/wiki/Adam_Smith" title="Adam Smith">Smith</a></li> <li><a href="/wiki/Immanuel_Kant" title="Immanuel Kant">Kant</a></li> <li><a href="/wiki/Georg_Wilhelm_Friedrich_Hegel" title="Georg Wilhelm Friedrich Hegel">Hegel</a></li> <li><a href="/wiki/Arthur_Schopenhauer" title="Arthur Schopenhauer">Schopenhauer</a></li> <li><a href="/wiki/Jeremy_Bentham" title="Jeremy Bentham">Bentham</a></li> <li><a href="/wiki/John_Stuart_Mill" title="John Stuart Mill">Mill</a></li> <li><a href="/wiki/S%C3%B8ren_Kierkegaard" title="Søren Kierkegaard">Kierkegaard</a></li> <li><a href="/wiki/Henry_Sidgwick" title="Henry Sidgwick">Sidgwick</a></li> <li><a href="/wiki/Friedrich_Nietzsche" title="Friedrich Nietzsche">Nietzsche</a></li> <li><a href="/wiki/G._E._Moore" title="G. E. Moore">Moore</a></li> <li><a href="/wiki/Karl_Barth" title="Karl Barth">Barth</a></li> <li><a href="/wiki/Paul_Tillich" title="Paul Tillich">Tillich</a></li> <li><a href="/wiki/Dietrich_Bonhoeffer" title="Dietrich Bonhoeffer">Bonhoeffer</a></li> <li><a href="/wiki/Philippa_Foot" title="Philippa Foot">Foot</a></li> <li><a href="/wiki/John_Rawls" title="John Rawls">Rawls</a></li> <li><a href="/wiki/John_Dewey" title="John Dewey">Dewey</a></li> <li><a href="/wiki/Bernard_Williams" title="Bernard Williams">Williams</a></li> <li><a href="/wiki/J._L._Mackie" title="J. L. Mackie">Mackie</a></li> <li><a href="/wiki/G._E._M._Anscombe" title="G. E. M. Anscombe">Anscombe</a></li> <li><a href="/wiki/William_Frankena" title="William Frankena">Frankena</a></li> <li><a href="/wiki/Alasdair_MacIntyre" title="Alasdair MacIntyre">MacIntyre</a></li> <li><a href="/wiki/R._M._Hare" title="R. M. Hare">Hare</a></li> <li><a href="/wiki/Peter_Singer" title="Peter Singer">Singer</a></li> <li><a href="/wiki/Derek_Parfit" title="Derek Parfit">Parfit</a></li> <li><a href="/wiki/Thomas_Nagel" title="Thomas Nagel">Nagel</a></li> <li><a href="/wiki/Robert_Merrihew_Adams" title="Robert Merrihew Adams">Adams</a></li> <li><a href="/wiki/Charles_Taylor_(philosopher)" title="Charles Taylor (philosopher)">Taylor</a></li> <li><a href="/wiki/Joxe_Azurmendi" title="Joxe Azurmendi">Azurmendi</a></li> <li><a href="/wiki/Christine_Korsgaard" title="Christine Korsgaard">Korsgaard</a></li> <li><a href="/wiki/Martha_Nussbaum" title="Martha Nussbaum">Nussbaum</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Works</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0;font-style:italic;"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Nicomachean_Ethics" title="Nicomachean Ethics">Nicomachean Ethics</a> <span style="font-size:85%;">(c. 322 BC)</span></li> <li><a href="/wiki/Ethics_(Spinoza_book)" class="mw-redirect" title="Ethics (Spinoza book)">Ethics (Spinoza)</a> <span style="font-size:85%;">(1677)</span></li> <li><a href="/wiki/Fifteen_Sermons_Preached_at_the_Rolls_Chapel" title="Fifteen Sermons Preached at the Rolls Chapel">Fifteen Sermons Preached at the Rolls Chapel</a> <span style="font-size:85%;">(1726)</span></li> <li><a href="/wiki/A_Treatise_of_Human_Nature" title="A Treatise of Human Nature">A Treatise of Human Nature</a> <span style="font-size:85%;">(1740)</span></li> <li><a href="/wiki/The_Theory_of_Moral_Sentiments" title="The Theory of Moral Sentiments">The Theory of Moral Sentiments</a> <span style="font-size:85%;">(1759)</span></li> <li><a href="/wiki/An_Introduction_to_the_Principles_of_Morals_and_Legislation" title="An Introduction to the Principles of Morals and Legislation">An Introduction to the Principles of Morals and Legislation</a> <span style="font-size:85%;">(1780)</span></li> <li><a href="/wiki/Groundwork_of_the_Metaphysics_of_Morals" title="Groundwork of the Metaphysics of Morals">Groundwork of the Metaphysics of Morals</a> <span style="font-size:85%;">(1785)</span></li> <li><a href="/wiki/Critique_of_Practical_Reason" title="Critique of Practical Reason">Critique of Practical Reason</a> <span style="font-size:85%;">(1788)</span></li> <li><a href="/wiki/Elements_of_the_Philosophy_of_Right" title="Elements of the Philosophy of Right">Elements of the Philosophy of Right</a> <span style="font-size:85%;">(1820)</span></li> <li><a href="/wiki/Either/Or_(Kierkegaard_book)" title="Either/Or (Kierkegaard book)">Either/Or</a> <span style="font-size:85%;">(1843)</span></li> <li><a href="/wiki/Utilitarianism_(book)" title="Utilitarianism (book)">Utilitarianism</a> <span style="font-size:85%;">(1861)</span></li> <li><a href="/wiki/The_Methods_of_Ethics" title="The Methods of Ethics">The Methods of Ethics</a> <span style="font-size:85%;">(1874)</span></li> <li><a href="/wiki/On_the_Genealogy_of_Morality" title="On the Genealogy of Morality">On the Genealogy of Morality</a> <span style="font-size:85%;">(1887)</span></li> <li><a href="/wiki/Principia_Ethica" title="Principia Ethica">Principia Ethica</a> <span style="font-size:85%;">(1903)</span></li> <li><a href="/wiki/A_Theory_of_Justice" title="A Theory of Justice">A Theory of Justice</a> <span style="font-size:85%;">(1971)</span></li> <li><a href="/wiki/Practical_Ethics" title="Practical Ethics">Practical Ethics</a> <span style="font-size:85%;">(1979)</span></li> <li><a href="/wiki/After_Virtue" title="After Virtue">After Virtue</a> <span style="font-size:85%;">(1981)</span></li> <li><a href="/wiki/Reasons_and_Persons" title="Reasons and Persons">Reasons and Persons</a> <span style="font-size:85%;">(1984)</span></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Related</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Axiology" class="mw-redirect" title="Axiology">Axiology</a></li> <li><a href="/wiki/Casuistry" title="Casuistry">Casuistry</a></li> <li><a href="/wiki/Descriptive_ethics" title="Descriptive ethics">Descriptive ethics</a></li> <li><a href="/wiki/Ethics_in_religion" title="Ethics in religion">Ethics in religion</a></li> <li><a href="/wiki/Evolutionary_ethics" title="Evolutionary ethics">Evolutionary ethics</a></li> <li><a href="/wiki/History_of_ethics" title="History of ethics">History of ethics</a></li> <li><a href="/wiki/Human_rights" title="Human rights">Human rights</a></li> <li><a href="/wiki/Ideology" title="Ideology">Ideology</a></li> <li><a href="/wiki/Moral_psychology" title="Moral psychology">Moral psychology</a></li> <li><a href="/wiki/Philosophy_of_law" class="mw-redirect" title="Philosophy of law">Philosophy of law</a></li> <li><a href="/wiki/Political_philosophy" title="Political philosophy">Political philosophy</a></li> <li><a href="/wiki/Population_ethics" title="Population ethics">Population ethics</a></li> <li><a href="/wiki/Rehabilitation_(penology)" title="Rehabilitation (penology)">Rehabilitation</a></li> <li><a href="/wiki/Secular_ethics" title="Secular ethics">Secular ethics</a></li> <li><a href="/wiki/Social_philosophy" title="Social philosophy">Social philosophy</a></li> <li><b><a href="/wiki/Index_of_ethics_articles" title="Index of ethics articles">Index</a></b></li></ul> </div></td></tr><tr><td class="navbox-abovebelow" colspan="2"><div> <ul><li><span class="noviewer" typeof="mw:File"><span title="Category"><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/16px-Symbol_category_class.svg.png" decoding="async" width="16" height="16" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/23px-Symbol_category_class.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/31px-Symbol_category_class.svg.png 2x" data-file-width="180" data-file-height="185" /></span></span> <a href="/wiki/Category:Ethics" title="Category:Ethics">Category</a></li></ul> </div></td></tr></tbody></table></div> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236075235"></div><div role="navigation" class="navbox" aria-labelledby="Existential_risk_from_artificial_intelligence285" style="padding:3px"><table class="nowraplinks mw-collapsible autocollapse navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th scope="col" class="navbox-title" colspan="2"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1239400231"><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Existential_risk_from_artificial_intelligence" title="Template:Existential risk from artificial intelligence"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/wiki/Template_talk:Existential_risk_from_artificial_intelligence" title="Template talk:Existential risk from artificial intelligence"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Existential_risk_from_artificial_intelligence" title="Special:EditPage/Template:Existential risk from artificial intelligence"><abbr title="Edit this template">e</abbr></a></li></ul></div><div id="Existential_risk_from_artificial_intelligence285" style="font-size:114%;margin:0 4em"><a href="/wiki/Existential_risk_from_artificial_intelligence" title="Existential risk from artificial intelligence">Existential risk</a> from <a href="/wiki/Artificial_intelligence" title="Artificial intelligence">artificial intelligence</a></div></th></tr><tr><th scope="row" class="navbox-group" style="width:1%">Concepts</th><td class="navbox-list-with-group navbox-list navbox-odd hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Artificial_general_intelligence" title="Artificial general intelligence">AGI</a></li> <li><a href="/wiki/AI_alignment" title="AI alignment">AI alignment</a></li> <li><a href="/wiki/AI_capability_control" title="AI capability control">AI capability control</a></li> <li><a href="/wiki/AI_safety" title="AI safety">AI safety</a></li> <li><a href="/wiki/AI_takeover" title="AI takeover">AI takeover</a></li> <li><a href="/wiki/Consequentialism" title="Consequentialism">Consequentialism</a></li> <li><a href="/wiki/Effective_accelerationism" title="Effective accelerationism">Effective accelerationism</a></li> <li><a class="mw-selflink selflink">Ethics of artificial intelligence</a></li> <li><a href="/wiki/Existential_risk_from_artificial_intelligence" title="Existential risk from artificial intelligence">Existential risk from artificial intelligence</a></li> <li><a href="/wiki/Friendly_artificial_intelligence" title="Friendly artificial intelligence">Friendly artificial intelligence</a></li> <li><a href="/wiki/Instrumental_convergence" title="Instrumental convergence">Instrumental convergence</a></li> <li><a href="/wiki/Intelligence_explosion" class="mw-redirect" title="Intelligence explosion">Intelligence explosion</a></li> <li><a href="/wiki/Longtermism" title="Longtermism">Longtermism</a></li> <li><a href="/wiki/Machine_ethics" title="Machine ethics">Machine ethics</a></li> <li><a href="/wiki/Risk_of_astronomical_suffering" title="Risk of astronomical suffering">Suffering risks</a></li> <li><a href="/wiki/Superintelligence" title="Superintelligence">Superintelligence</a></li> <li><a href="/wiki/Technological_singularity" title="Technological singularity">Technological singularity</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Organizations</th><td class="navbox-list-with-group navbox-list navbox-even hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Alignment_Research_Center" title="Alignment Research Center">Alignment Research Center</a></li> <li><a href="/wiki/Center_for_AI_Safety" title="Center for AI Safety">Center for AI Safety</a></li> <li><a href="/wiki/Center_for_Applied_Rationality" title="Center for Applied Rationality">Center for Applied Rationality</a></li> <li><a href="/wiki/Center_for_Human-Compatible_Artificial_Intelligence" title="Center for Human-Compatible Artificial Intelligence">Center for Human-Compatible Artificial Intelligence</a></li> <li><a href="/wiki/Centre_for_the_Study_of_Existential_Risk" title="Centre for the Study of Existential Risk">Centre for the Study of Existential Risk</a></li> <li><a href="/wiki/EleutherAI" title="EleutherAI">EleutherAI</a></li> <li><a href="/wiki/Future_of_Humanity_Institute" title="Future of Humanity Institute">Future of Humanity Institute</a></li> <li><a href="/wiki/Future_of_Life_Institute" title="Future of Life Institute">Future of Life Institute</a></li> <li><a href="/wiki/Google_DeepMind" title="Google DeepMind">Google DeepMind</a></li> <li><a href="/wiki/Humanity%2B" title="Humanity+">Humanity+</a></li> <li><a href="/wiki/Institute_for_Ethics_and_Emerging_Technologies" title="Institute for Ethics and Emerging Technologies">Institute for Ethics and Emerging Technologies</a></li> <li><a href="/wiki/Leverhulme_Centre_for_the_Future_of_Intelligence" title="Leverhulme Centre for the Future of Intelligence">Leverhulme Centre for the Future of Intelligence</a></li> <li><a href="/wiki/Machine_Intelligence_Research_Institute" title="Machine Intelligence Research Institute">Machine Intelligence Research Institute</a></li> <li><a href="/wiki/OpenAI" title="OpenAI">OpenAI</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">People</th><td class="navbox-list-with-group navbox-list navbox-odd hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Slate_Star_Codex" title="Slate Star Codex">Scott Alexander</a></li> <li><a href="/wiki/Sam_Altman" title="Sam Altman">Sam Altman</a></li> <li><a href="/wiki/Yoshua_Bengio" title="Yoshua Bengio">Yoshua Bengio</a></li> <li><a href="/wiki/Nick_Bostrom" title="Nick Bostrom">Nick Bostrom</a></li> <li><a href="/wiki/Paul_Christiano_(researcher)" title="Paul Christiano (researcher)">Paul Christiano</a></li> <li><a href="/wiki/K._Eric_Drexler" title="K. Eric Drexler">Eric Drexler</a></li> <li><a href="/wiki/Sam_Harris" title="Sam Harris">Sam Harris</a></li> <li><a href="/wiki/Stephen_Hawking" title="Stephen Hawking">Stephen Hawking</a></li> <li><a href="/wiki/Dan_Hendrycks" title="Dan Hendrycks">Dan Hendrycks</a></li> <li><a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Geoffrey Hinton</a></li> <li><a href="/wiki/Bill_Joy" title="Bill Joy">Bill Joy</a></li> <li><a href="/wiki/Shane_Legg" title="Shane Legg">Shane Legg</a></li> <li><a href="/wiki/Elon_Musk" title="Elon Musk">Elon Musk</a></li> <li><a href="/wiki/Steve_Omohundro" title="Steve Omohundro">Steve Omohundro</a></li> <li><a href="/wiki/Huw_Price" title="Huw Price">Huw Price</a></li> <li><a href="/wiki/Martin_Rees" title="Martin Rees">Martin Rees</a></li> <li><a href="/wiki/Stuart_J._Russell" title="Stuart J. Russell">Stuart J. Russell</a></li> <li><a href="/wiki/Jaan_Tallinn" title="Jaan Tallinn">Jaan Tallinn</a></li> <li><a href="/wiki/Max_Tegmark" title="Max Tegmark">Max Tegmark</a></li> <li><a href="/wiki/Frank_Wilczek" title="Frank Wilczek">Frank Wilczek</a></li> <li><a href="/wiki/Roman_Yampolskiy" title="Roman Yampolskiy">Roman Yampolskiy</a></li> <li><a href="/wiki/Eliezer_Yudkowsky" title="Eliezer Yudkowsky">Eliezer Yudkowsky</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Other</th><td class="navbox-list-with-group navbox-list navbox-even hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Statement_on_AI_risk_of_extinction" title="Statement on AI risk of extinction">Statement on AI risk of extinction</a></li> <li><i><a href="/wiki/Human_Compatible" title="Human Compatible">Human Compatible</a></i></li> <li><a href="/wiki/Open_letter_on_artificial_intelligence_(2015)" title="Open letter on artificial intelligence (2015)">Open letter on artificial intelligence (2015)</a></li> <li><i><a href="/wiki/Our_Final_Invention" title="Our Final Invention">Our Final Invention</a></i></li> <li><i><a href="/wiki/The_Precipice:_Existential_Risk_and_the_Future_of_Humanity" title="The Precipice: Existential Risk and the Future of Humanity">The Precipice</a></i></li> <li><i><a href="/wiki/Superintelligence:_Paths,_Dangers,_Strategies" title="Superintelligence: Paths, Dangers, Strategies">Superintelligence: Paths, Dangers, Strategies</a></i></li> <li><i><a href="/wiki/Do_You_Trust_This_Computer%3F" title="Do You Trust This Computer?">Do You Trust This Computer?</a></i></li> <li><a href="/wiki/Artificial_Intelligence_Act" title="Artificial Intelligence Act">Artificial Intelligence Act</a></li></ul> </div></td></tr><tr><td class="navbox-abovebelow" colspan="2"><div><span class="noviewer" typeof="mw:File"><span title="Category"><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/16px-Symbol_category_class.svg.png" decoding="async" width="16" height="16" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/23px-Symbol_category_class.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/31px-Symbol_category_class.svg.png 2x" data-file-width="180" data-file-height="185" /></span></span> <a href="/wiki/Category:Existential_risk_from_artificial_general_intelligence" title="Category:Existential risk from artificial general intelligence">Category</a></div></td></tr></tbody></table></div> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236075235"></div><div role="navigation" class="navbox" aria-labelledby="Philosophy_of_science439" style="padding:3px"><table class="nowraplinks hlist mw-collapsible autocollapse navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th scope="col" class="navbox-title" colspan="2"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1239400231"><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Philosophy_of_science" title="Template:Philosophy of science"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/wiki/Template_talk:Philosophy_of_science" title="Template talk:Philosophy of science"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Philosophy_of_science" title="Special:EditPage/Template:Philosophy of science"><abbr title="Edit this template">e</abbr></a></li></ul></div><div id="Philosophy_of_science439" style="font-size:114%;margin:0 4em"><a href="/wiki/Philosophy_of_science" title="Philosophy of science">Philosophy of science</a></div></th></tr><tr><th scope="row" class="navbox-group" style="width:1%">Concepts</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Philosophical_analysis" title="Philosophical analysis">Analysis</a></li> <li><a href="/wiki/Analytic%E2%80%93synthetic_distinction" title="Analytic–synthetic distinction">Analytic–synthetic distinction</a></li> <li><a href="/wiki/A_priori_and_a_posteriori" title="A priori and a posteriori"><i>A priori</i> and <i>a posteriori</i></a></li> <li><a href="/wiki/Causality" title="Causality">Causality</a> <ul><li><a href="/wiki/Mill%27s_Methods" class="mw-redirect" title="Mill&#39;s Methods">Mill's Methods</a></li></ul></li> <li><a href="/wiki/Commensurability_(philosophy_of_science)" title="Commensurability (philosophy of science)">Commensurability</a></li> <li><a href="/wiki/Consilience" title="Consilience">Consilience</a></li> <li><a href="/wiki/Construct_(philosophy)" title="Construct (philosophy)">Construct</a></li> <li><a href="/wiki/Correlation" title="Correlation">Correlation</a> <ul><li><a href="/wiki/Correlation_function" title="Correlation function">function</a></li></ul></li> <li><a href="/wiki/Creative_synthesis" title="Creative synthesis">Creative synthesis</a></li> <li><a href="/wiki/Demarcation_problem" title="Demarcation problem">Demarcation problem</a></li> <li><a href="/wiki/Empirical_evidence" title="Empirical evidence">Empirical evidence</a></li> <li><a href="/wiki/Experiment" title="Experiment">Experiment</a> <ul><li><a href="/wiki/Design_of_experiments" title="Design of experiments">design</a></li></ul></li> <li><a href="/wiki/Explanatory_power" title="Explanatory power">Explanatory power</a></li> <li><a href="/wiki/Fact" title="Fact">Fact</a></li> <li><a href="/wiki/Falsifiability" title="Falsifiability">Falsifiability</a></li> <li><a href="/wiki/Feminist_method" title="Feminist method">Feminist method</a></li> <li><a href="/wiki/Functional_contextualism" title="Functional contextualism">Functional contextualism</a></li> <li><a href="/wiki/Hypothesis" title="Hypothesis">Hypothesis</a> <ul><li><a href="/wiki/Alternative_hypothesis" title="Alternative hypothesis">alternative</a></li> <li><a href="/wiki/Null_hypothesis" title="Null hypothesis">null</a></li></ul></li> <li><i><a href="/wiki/Ignoramus_et_ignorabimus" title="Ignoramus et ignorabimus">Ignoramus et ignorabimus</a></i></li> <li><a href="/wiki/Inductive_reasoning" title="Inductive reasoning">Inductive reasoning</a></li> <li><a href="/wiki/Intertheoretic_reduction" title="Intertheoretic reduction">Intertheoretic reduction</a></li> <li><a href="/wiki/Inquiry" title="Inquiry">Inquiry</a></li> <li><a href="/wiki/Nature_(philosophy)" title="Nature (philosophy)">Nature</a></li> <li><a href="/wiki/Objectivity_(philosophy)" class="mw-redirect" title="Objectivity (philosophy)">Objectivity</a></li> <li><a href="/wiki/Observation" title="Observation">Observation</a></li> <li><a href="/wiki/Paradigm" title="Paradigm">Paradigm</a></li> <li><a href="/wiki/Problem_of_induction" title="Problem of induction">Problem of induction</a></li> <li><a href="/wiki/Scientific_evidence" title="Scientific evidence">Scientific evidence</a> <ul><li><a href="/wiki/Evidence-based_practice" title="Evidence-based practice">Evidence-based practice</a></li></ul></li> <li><a href="/wiki/Scientific_law" title="Scientific law">Scientific law</a></li> <li><a href="/wiki/Scientific_method" title="Scientific method">Scientific method</a></li> <li><a href="/wiki/Scientific_pluralism" title="Scientific pluralism">Scientific pluralism</a></li> <li><a href="/wiki/Scientific_Revolution" title="Scientific Revolution">Scientific Revolution</a></li> <li><a href="/wiki/Testability" title="Testability">Testability</a></li> <li><a href="/wiki/Theory" title="Theory">Theory</a> <ul><li><a href="/wiki/Theory_choice" title="Theory choice">choice</a></li> <li><a href="/wiki/Theory-ladenness" title="Theory-ladenness">ladenness</a></li> <li><a href="/wiki/Scientific_theory" title="Scientific theory">scientific</a></li></ul></li> <li><a href="/wiki/Underdetermination" title="Underdetermination">Underdetermination</a></li> <li><a href="/wiki/Unity_of_science" title="Unity of science">Unity of science</a></li> <li><a href="/wiki/Variable_and_attribute_(research)" title="Variable and attribute (research)">Variable</a> <ul><li><a href="/wiki/Control_variable" title="Control variable">control</a></li> <li><a href="/wiki/Dependent_and_independent_variables" title="Dependent and independent variables">dependent and independent</a></li></ul></li> <li><a href="/wiki/Index_of_philosophy_of_science_articles" title="Index of philosophy of science articles">more...</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Theories</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Coherentism" title="Coherentism">Coherentism</a></li> <li><a href="/wiki/Confirmation_holism" title="Confirmation holism">Confirmation holism</a></li> <li><a href="/wiki/Constructive_empiricism" title="Constructive empiricism">Constructive empiricism</a></li> <li><a href="/wiki/Constructive_realism" title="Constructive realism">Constructive realism</a></li> <li><a href="/wiki/Constructivist_epistemology" class="mw-redirect" title="Constructivist epistemology">Constructivist epistemology</a></li> <li><a href="/wiki/Contextualism" title="Contextualism">Contextualism</a></li> <li><a href="/wiki/Conventionalism" title="Conventionalism">Conventionalism</a></li> <li><a href="/wiki/Deductive-nomological_model" title="Deductive-nomological model">Deductive-nomological model</a></li> <li><a href="/wiki/Epistemological_anarchism" class="mw-redirect" title="Epistemological anarchism">Epistemological anarchism</a></li> <li><a href="/wiki/Evolutionism" title="Evolutionism">Evolutionism</a></li> <li><a href="/wiki/Fallibilism" title="Fallibilism">Fallibilism</a></li> <li><a href="/wiki/Foundationalism" title="Foundationalism">Foundationalism</a></li> <li><a href="/wiki/Hypothetico-deductive_model" title="Hypothetico-deductive model">Hypothetico-deductive model</a></li> <li><a href="/wiki/Inductionism" title="Inductionism">Inductionism</a></li> <li><a href="/wiki/Instrumentalism" title="Instrumentalism">Instrumentalism</a></li> <li><a href="/wiki/Model-dependent_realism" title="Model-dependent realism">Model-dependent realism</a></li> <li><a href="/wiki/Naturalism_(philosophy)" title="Naturalism (philosophy)">Naturalism</a></li> <li><a href="/wiki/Physicalism" title="Physicalism">Physicalism</a></li> <li><a href="/wiki/Positivism" title="Positivism">Positivism</a>&#160;/&#32;<a href="/wiki/Reductionism" title="Reductionism">Reductionism</a>&#160;/&#32;<a href="/wiki/Determinism" title="Determinism">Determinism</a></li> <li><a href="/wiki/Pragmatism" title="Pragmatism">Pragmatism</a></li> <li><a href="/wiki/Rationalism" title="Rationalism">Rationalism</a>&#160;/&#32;<a href="/wiki/Empiricism" title="Empiricism">Empiricism</a></li> <li><a href="/wiki/Received_view_of_theories" title="Received view of theories">Received view</a>&#160;/&#32;<a href="/wiki/Semantic_view_of_theories" title="Semantic view of theories">Semantic view of theories</a></li> <li><a href="/wiki/Scientific_essentialism" title="Scientific essentialism">Scientific essentialism</a></li> <li><a href="/wiki/Scientific_formalism" title="Scientific formalism">Scientific formalism</a></li> <li><a href="/wiki/Scientific_realism" title="Scientific realism">Scientific realism</a>&#160;/&#32;<a href="/wiki/Anti-realism" title="Anti-realism">Anti-realism</a></li> <li><a href="/wiki/Scientific_skepticism" title="Scientific skepticism">Scientific skepticism</a></li> <li><a href="/wiki/Scientism" title="Scientism">Scientism</a></li> <li><a href="/wiki/Structuralism_(philosophy_of_science)" title="Structuralism (philosophy of science)">Structuralism</a></li> <li><a href="/wiki/Uniformitarianism" title="Uniformitarianism">Uniformitarianism</a></li> <li><a href="/wiki/Verificationism" title="Verificationism">Verificationism</a></li> <li><a href="/wiki/Vitalism" title="Vitalism">Vitalism</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Philosophy of...</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Philosophy_of_biology" title="Philosophy of biology">Biology</a></li> <li><a href="/wiki/Philosophy_of_chemistry" title="Philosophy of chemistry">Chemistry</a></li> <li><a href="/wiki/Philosophy_of_physics" title="Philosophy of physics">Physics</a> <ul><li><a href="/wiki/Philosophy_of_space_and_time" title="Philosophy of space and time">Space and time</a></li></ul></li> <li><a href="/wiki/Philosophy_of_social_science" title="Philosophy of social science">Social science</a> <ul><li><a href="/wiki/Philosophy_of_archaeology" title="Philosophy of archaeology">Archaeology</a></li> <li><a href="/wiki/Philosophy_of_economics" class="mw-redirect" title="Philosophy of economics">Economics‎</a></li> <li><a href="/wiki/Philosophy_of_geography" title="Philosophy of geography">Geography</a></li> <li><a href="/wiki/Philosophy_of_history" title="Philosophy of history">History</a></li> <li><a href="/wiki/Philosophy_of_linguistics" title="Philosophy of linguistics">Linguistics</a></li> <li><a href="/wiki/Philosophy_of_psychology" title="Philosophy of psychology">Psychology</a></li></ul></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Related topics</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Criticism_of_science" title="Criticism of science">Criticism of science</a></li> <li><a href="/wiki/Descriptive_research" title="Descriptive research">Descriptive science</a></li> <li><a href="/wiki/Epistemology" title="Epistemology">Epistemology</a></li> <li><a href="/wiki/Exact_sciences" title="Exact sciences">Exact sciences</a></li> <li><a href="/wiki/Faith_and_rationality" title="Faith and rationality">Faith and rationality</a></li> <li><a href="/wiki/Hard_and_soft_science" title="Hard and soft science">Hard and soft science</a></li> <li><a href="/wiki/History_and_philosophy_of_science" title="History and philosophy of science">History and philosophy of science</a></li> <li><a href="/wiki/Non-science" title="Non-science">Non-science</a> <ul><li><a href="/wiki/Pseudoscience" title="Pseudoscience">Pseudoscience</a></li></ul></li> <li><a href="/wiki/Normative_science" title="Normative science">Normative science</a></li> <li><a href="/wiki/Protoscience" title="Protoscience">Protoscience</a></li> <li><a href="/wiki/Questionable_cause" title="Questionable cause">Questionable cause</a></li> <li><a href="/wiki/Relationship_between_religion_and_science" title="Relationship between religion and science">Relationship between religion and science</a></li> <li><a href="/wiki/Rhetoric_of_science" title="Rhetoric of science">Rhetoric of science</a></li> <li><a href="/wiki/Science_studies" title="Science studies">Science studies</a></li> <li><a href="/wiki/Sociology_of_scientific_ignorance" title="Sociology of scientific ignorance">Sociology of scientific ignorance</a></li> <li><a href="/wiki/Sociology_of_scientific_knowledge" title="Sociology of scientific knowledge">Sociology of scientific knowledge</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/List_of_philosophers_of_science" title="List of philosophers of science">Philosophers of science</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"></div><table class="nowraplinks navbox-subgroup" style="border-spacing:0"><tbody><tr><th id="Precursors10" scope="row" class="navbox-group" style="width:7.5em">Precursors</th><td class="navbox-list-with-group navbox-list navbox-odd" style="padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Roger_Bacon" title="Roger Bacon">Roger Bacon</a></li> <li><a href="/wiki/Francis_Bacon" title="Francis Bacon">Francis Bacon</a></li> <li><a href="/wiki/Galileo_Galilei" title="Galileo Galilei">Galileo Galilei</a></li> <li><a href="/wiki/Isaac_Newton" title="Isaac Newton">Isaac Newton</a></li> <li><a href="/wiki/David_Hume" title="David Hume">David Hume</a></li></ul> </div></td></tr></tbody></table><div> <ul><li><a href="/wiki/Auguste_Comte" title="Auguste Comte">Auguste Comte</a></li> <li><a href="/wiki/Henri_Poincar%C3%A9" title="Henri Poincaré">Henri Poincaré</a></li> <li><a href="/wiki/Pierre_Duhem" title="Pierre Duhem">Pierre Duhem</a></li> <li><a href="/wiki/Rudolf_Steiner" title="Rudolf Steiner">Rudolf Steiner</a></li> <li><a href="/wiki/Karl_Pearson" title="Karl Pearson">Karl Pearson</a></li> <li><a href="/wiki/Charles_Sanders_Peirce" title="Charles Sanders Peirce">Charles Sanders Peirce</a></li> <li><a href="/wiki/Wilhelm_Windelband" title="Wilhelm Windelband">Wilhelm Windelband</a></li> <li><a href="/wiki/Alfred_North_Whitehead" title="Alfred North Whitehead">Alfred North Whitehead</a></li> <li><a href="/wiki/Bertrand_Russell" title="Bertrand Russell">Bertrand Russell</a></li> <li><a href="/wiki/Otto_Neurath" title="Otto Neurath">Otto Neurath</a></li> <li><a href="/wiki/C._D._Broad" title="C. D. Broad">C. D. Broad</a></li> <li><a href="/wiki/Michael_Polanyi" title="Michael Polanyi">Michael Polanyi</a></li> <li><a href="/wiki/Hans_Reichenbach" title="Hans Reichenbach">Hans Reichenbach</a></li> <li><a href="/wiki/Rudolf_Carnap" title="Rudolf Carnap">Rudolf Carnap</a></li> <li><a href="/wiki/Karl_Popper" title="Karl Popper">Karl Popper</a></li> <li><a href="/wiki/Carl_Gustav_Hempel" title="Carl Gustav Hempel">Carl Gustav Hempel</a></li> <li><a href="/wiki/Willard_Van_Orman_Quine" title="Willard Van Orman Quine">W. V. O. Quine</a></li> <li><a href="/wiki/Thomas_Kuhn" title="Thomas Kuhn">Thomas Kuhn</a></li> <li><a href="/wiki/Imre_Lakatos" title="Imre Lakatos">Imre Lakatos</a></li> <li><a href="/wiki/Paul_Feyerabend" title="Paul Feyerabend">Paul Feyerabend</a></li> <li><a href="/wiki/Ian_Hacking" title="Ian Hacking">Ian Hacking</a></li> <li><a href="/wiki/Bas_van_Fraassen" title="Bas van Fraassen">Bas van Fraassen</a></li> <li><a href="/wiki/Larry_Laudan" title="Larry Laudan">Larry Laudan</a></li></ul></div></td></tr><tr><td class="navbox-abovebelow" colspan="2"><div> <li><a href="/wiki/Category:Philosophy_of_science" title="Category:Philosophy of science">Category</a></li> <li><span class="nowrap"><span class="noviewer" typeof="mw:File"><span><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/c/cd/Socrates.png/18px-Socrates.png" decoding="async" width="18" height="28" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/c/cd/Socrates.png/27px-Socrates.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/c/cd/Socrates.png/36px-Socrates.png 2x" data-file-width="326" data-file-height="500" /></span></span> </span><a href="/wiki/Portal:Philosophy" title="Portal:Philosophy">Philosophy&#32;portal</a></li> <li><span class="nowrap"><span class="noviewer" typeof="mw:File"><a href="/wiki/File:Nuvola_apps_kalzium.svg" class="mw-file-description"><img alt="icon" src="//upload.wikimedia.org/wikipedia/commons/thumb/8/8b/Nuvola_apps_kalzium.svg/28px-Nuvola_apps_kalzium.svg.png" decoding="async" width="28" height="28" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/8/8b/Nuvola_apps_kalzium.svg/42px-Nuvola_apps_kalzium.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/8/8b/Nuvola_apps_kalzium.svg/56px-Nuvola_apps_kalzium.svg.png 2x" data-file-width="128" data-file-height="128" /></a></span> </span><a href="/wiki/Portal:Science" title="Portal:Science">Science&#32;portal</a></li> </div></td></tr></tbody></table></div> <!-- NewPP limit report Parsed by mw‐web.codfw.main‐b766959bd‐l4rcj Cached time: 20250214041836 Cache expiry: 2592000 Reduced expiry: false Complications: [vary‐revision‐sha1, show‐toc] CPU time usage: 1.750 seconds Real time usage: 1.884 seconds Preprocessor visited node count: 12392/1000000 Post‐expand include size: 545842/2097152 bytes Template argument size: 7120/2097152 bytes Highest expansion depth: 15/100 Expensive parser function count: 19/500 Unstrip recursion depth: 1/20 Unstrip post‐expand size: 741002/5000000 bytes Lua time usage: 1.178/10.000 seconds Lua memory usage: 27879394/52428800 bytes Lua Profile: ? 240 ms 22.2% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::callParserFunction 140 ms 13.0% type 80 ms 7.4% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::gsub 80 ms 7.4% dataWrapper <mw.lua:672> 80 ms 7.4% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::find 80 ms 7.4% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::preprocess 60 ms 5.6% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::len 60 ms 5.6% <mw.title.lua:50> 40 ms 3.7% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::getAllExpandedArguments 40 ms 3.7% [others] 180 ms 16.7% Number of Wikibase entities loaded: 0/400 --> <!-- Transclusion expansion time report (%,ms,calls,template) 100.00% 1602.875 1 -total 74.35% 1191.766 1 Template:Reflist 20.64% 330.759 68 Template:Cite_web 18.73% 300.271 56 Template:Cite_journal 6.33% 101.505 1 Template:Harvnb 5.10% 81.696 1 Template:In_lang 4.91% 78.635 20 Template:Cite_news 4.54% 72.719 1 Template:Artificial_intelligence 4.49% 71.934 18 Template:Cite_book 4.46% 71.431 1 Template:Sidebar_with_collapsible_lists --> <!-- Saved in parser cache with key enwiki:pcache:13659583:|#|:idhash:canonical and timestamp 20250214041836 and revision id 1274944047. Rendering was triggered because: page-view --> </div><!--esi <esi:include src="/esitest-fa8a495983347898/content" /> --><noscript><img src="https://login.wikimedia.org/wiki/Special:CentralAutoLogin/start?useformat=desktop&amp;type=1x1&amp;usesul3=0" alt="" width="1" height="1" style="border: none; position: absolute;"></noscript> <div class="printfooter" data-nosnippet="">Retrieved from "<a dir="ltr" href="https://en.wikipedia.org/w/index.php?title=Ethics_of_artificial_intelligence&amp;oldid=1274944047">https://en.wikipedia.org/w/index.php?title=Ethics_of_artificial_intelligence&amp;oldid=1274944047</a>"</div></div> <div id="catlinks" class="catlinks" data-mw="interface"><div id="mw-normal-catlinks" class="mw-normal-catlinks"><a href="/wiki/Help:Category" title="Help:Category">Categories</a>: <ul><li><a href="/wiki/Category:Artificial_intelligence" title="Category:Artificial intelligence">Artificial intelligence</a></li><li><a href="/wiki/Category:Philosophy_of_artificial_intelligence" title="Category:Philosophy of artificial intelligence">Philosophy of artificial intelligence</a></li><li><a href="/wiki/Category:Ethics_of_science_and_technology" title="Category:Ethics of science and technology">Ethics of science and technology</a></li><li><a href="/wiki/Category:Regulation_of_robots" title="Category:Regulation of robots">Regulation of robots</a></li></ul></div><div id="mw-hidden-catlinks" class="mw-hidden-catlinks mw-hidden-cats-hidden">Hidden categories: <ul><li><a href="/wiki/Category:Webarchive_template_wayback_links" title="Category:Webarchive template wayback links">Webarchive template wayback links</a></li><li><a href="/wiki/Category:Articles_with_Russian-language_sources_(ru)" title="Category:Articles with Russian-language sources (ru)">Articles with Russian-language sources (ru)</a></li><li><a href="/wiki/Category:Articles_with_short_description" title="Category:Articles with short description">Articles with short description</a></li><li><a href="/wiki/Category:Short_description_is_different_from_Wikidata" title="Category:Short description is different from Wikidata">Short description is different from Wikidata</a></li><li><a href="/wiki/Category:All_articles_lacking_reliable_references" title="Category:All articles lacking reliable references">All articles lacking reliable references</a></li><li><a href="/wiki/Category:Articles_lacking_reliable_references_from_January_2024" title="Category:Articles lacking reliable references from January 2024">Articles lacking reliable references from January 2024</a></li><li><a href="/wiki/Category:All_accuracy_disputes" title="Category:All accuracy disputes">All accuracy disputes</a></li><li><a href="/wiki/Category:Articles_with_disputed_statements_from_April_2024" title="Category:Articles with disputed statements from April 2024">Articles with disputed statements from April 2024</a></li><li><a href="/wiki/Category:All_articles_with_unsourced_statements" title="Category:All articles with unsourced statements">All articles with unsourced statements</a></li><li><a href="/wiki/Category:Articles_with_unsourced_statements_from_June_2024" title="Category:Articles with unsourced statements from June 2024">Articles with unsourced statements from June 2024</a></li><li><a href="/wiki/Category:All_articles_with_failed_verification" title="Category:All articles with failed verification">All articles with failed verification</a></li><li><a href="/wiki/Category:Articles_with_failed_verification_from_November_2020" title="Category:Articles with failed verification from November 2020">Articles with failed verification from November 2020</a></li></ul></div></div> </div> </main> </div> <div class="mw-footer-container"> <footer id="footer" class="mw-footer" > <ul id="footer-info"> <li id="footer-info-lastmod"> This page was last edited on 10 February 2025, at 04:23<span class="anonymous-show">&#160;(UTC)</span>.</li> <li id="footer-info-copyright">Text is available under the <a href="/wiki/Wikipedia:Text_of_the_Creative_Commons_Attribution-ShareAlike_4.0_International_License" title="Wikipedia:Text of the Creative Commons Attribution-ShareAlike 4.0 International License">Creative Commons Attribution-ShareAlike 4.0 License</a>; additional terms may apply. By using this site, you agree to the <a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Terms_of_Use" class="extiw" title="foundation:Special:MyLanguage/Policy:Terms of Use">Terms of Use</a> and <a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy" class="extiw" title="foundation:Special:MyLanguage/Policy:Privacy policy">Privacy Policy</a>. Wikipedia® is a registered trademark of the <a rel="nofollow" class="external text" href="https://wikimediafoundation.org/">Wikimedia Foundation, Inc.</a>, a non-profit organization.</li> </ul> <ul id="footer-places"> <li id="footer-places-privacy"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy">Privacy policy</a></li> <li id="footer-places-about"><a href="/wiki/Wikipedia:About">About Wikipedia</a></li> <li id="footer-places-disclaimers"><a href="/wiki/Wikipedia:General_disclaimer">Disclaimers</a></li> <li id="footer-places-contact"><a href="//en.wikipedia.org/wiki/Wikipedia:Contact_us">Contact Wikipedia</a></li> <li id="footer-places-wm-codeofconduct"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Universal_Code_of_Conduct">Code of Conduct</a></li> <li id="footer-places-developers"><a href="https://developer.wikimedia.org">Developers</a></li> <li id="footer-places-statslink"><a href="https://stats.wikimedia.org/#/en.wikipedia.org">Statistics</a></li> <li id="footer-places-cookiestatement"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Cookie_statement">Cookie statement</a></li> <li id="footer-places-mobileview"><a href="//en.m.wikipedia.org/w/index.php?title=Ethics_of_artificial_intelligence&amp;mobileaction=toggle_view_mobile" class="noprint stopMobileRedirectToggle">Mobile view</a></li> </ul> <ul id="footer-icons" class="noprint"> <li id="footer-copyrightico"><a href="https://wikimediafoundation.org/" class="cdx-button cdx-button--fake-button cdx-button--size-large cdx-button--fake-button--enabled"><img src="/static/images/footer/wikimedia-button.svg" width="84" height="29" alt="Wikimedia Foundation" lang="en" loading="lazy"></a></li> <li id="footer-poweredbyico"><a href="https://www.mediawiki.org/" class="cdx-button cdx-button--fake-button cdx-button--size-large cdx-button--fake-button--enabled"><picture><source media="(min-width: 500px)" srcset="/w/resources/assets/poweredby_mediawiki.svg" width="88" height="31"><img src="/w/resources/assets/mediawiki_compact.svg" alt="Powered by MediaWiki" width="25" height="25" loading="lazy"></picture></a></li> </ul> </footer> </div> </div> </div> <div class="vector-header-container vector-sticky-header-container"> <div id="vector-sticky-header" class="vector-sticky-header"> <div class="vector-sticky-header-start"> <div class="vector-sticky-header-icon-start vector-button-flush-left vector-button-flush-right" aria-hidden="true"> <button class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-sticky-header-search-toggle" tabindex="-1" data-event-name="ui.vector-sticky-search-form.icon"><span class="vector-icon mw-ui-icon-search mw-ui-icon-wikimedia-search"></span> <span>Search</span> </button> </div> <div role="search" class="vector-search-box-vue vector-search-box-show-thumbnail vector-search-box"> <div class="vector-typeahead-search-container"> <div class="cdx-typeahead-search cdx-typeahead-search--show-thumbnail"> <form action="/w/index.php" id="vector-sticky-search-form" class="cdx-search-input cdx-search-input--has-end-button"> <div class="cdx-search-input__input-wrapper" data-search-loc="header-moved"> <div class="cdx-text-input cdx-text-input--has-start-icon"> <input class="cdx-text-input__input" type="search" name="search" placeholder="Search Wikipedia"> <span class="cdx-text-input__icon cdx-text-input__start-icon"></span> </div> <input type="hidden" name="title" value="Special:Search"> </div> <button class="cdx-button cdx-search-input__end-button">Search</button> </form> </div> </div> </div> <div class="vector-sticky-header-context-bar"> <nav aria-label="Contents" class="vector-toc-landmark"> <div id="vector-sticky-header-toc" class="vector-dropdown mw-portlet mw-portlet-sticky-header-toc vector-sticky-header-toc vector-button-flush-left" > <input type="checkbox" id="vector-sticky-header-toc-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-sticky-header-toc" class="vector-dropdown-checkbox " aria-label="Toggle the table of contents" > <label id="vector-sticky-header-toc-label" for="vector-sticky-header-toc-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-listBullet mw-ui-icon-wikimedia-listBullet"></span> <span class="vector-dropdown-label-text">Toggle the table of contents</span> </label> <div class="vector-dropdown-content"> <div id="vector-sticky-header-toc-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <div class="vector-sticky-header-context-bar-primary" aria-hidden="true" ><span class="mw-page-title-main">Ethics of artificial intelligence</span></div> </div> </div> <div class="vector-sticky-header-end" aria-hidden="true"> <div class="vector-sticky-header-icons"> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only" id="ca-talk-sticky-header" tabindex="-1" data-event-name="talk-sticky-header"><span class="vector-icon mw-ui-icon-speechBubbles mw-ui-icon-wikimedia-speechBubbles"></span> <span></span> </a> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only" id="ca-subject-sticky-header" tabindex="-1" data-event-name="subject-sticky-header"><span class="vector-icon mw-ui-icon-article mw-ui-icon-wikimedia-article"></span> <span></span> </a> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only" id="ca-history-sticky-header" tabindex="-1" data-event-name="history-sticky-header"><span class="vector-icon mw-ui-icon-wikimedia-history mw-ui-icon-wikimedia-wikimedia-history"></span> <span></span> </a> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only mw-watchlink" id="ca-watchstar-sticky-header" tabindex="-1" data-event-name="watch-sticky-header"><span class="vector-icon mw-ui-icon-wikimedia-star mw-ui-icon-wikimedia-wikimedia-star"></span> <span></span> </a> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only" id="ca-edit-sticky-header" tabindex="-1" data-event-name="wikitext-edit-sticky-header"><span class="vector-icon mw-ui-icon-wikimedia-wikiText mw-ui-icon-wikimedia-wikimedia-wikiText"></span> <span></span> </a> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only" id="ca-ve-edit-sticky-header" tabindex="-1" data-event-name="ve-edit-sticky-header"><span class="vector-icon mw-ui-icon-wikimedia-edit mw-ui-icon-wikimedia-wikimedia-edit"></span> <span></span> </a> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only" id="ca-viewsource-sticky-header" tabindex="-1" data-event-name="ve-edit-protected-sticky-header"><span class="vector-icon mw-ui-icon-wikimedia-editLock mw-ui-icon-wikimedia-wikimedia-editLock"></span> <span></span> </a> </div> <div class="vector-sticky-header-buttons"> <button class="cdx-button cdx-button--weight-quiet mw-interlanguage-selector" id="p-lang-btn-sticky-header" tabindex="-1" data-event-name="ui.dropdown-p-lang-btn-sticky-header"><span class="vector-icon mw-ui-icon-wikimedia-language mw-ui-icon-wikimedia-wikimedia-language"></span> <span>27 languages</span> </button> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--action-progressive" id="ca-addsection-sticky-header" tabindex="-1" data-event-name="addsection-sticky-header"><span class="vector-icon mw-ui-icon-speechBubbleAdd-progressive mw-ui-icon-wikimedia-speechBubbleAdd-progressive"></span> <span>Add topic</span> </a> </div> <div class="vector-sticky-header-icon-end"> <div class="vector-user-links"> </div> </div> </div> </div> </div> <div class="vector-settings" id="p-dock-bottom"> <ul></ul> </div><script>(RLQ=window.RLQ||[]).push(function(){mw.config.set({"wgHostname":"mw-web.codfw.main-b766959bd-zq28g","wgBackendResponseTime":130,"wgPageParseReport":{"limitreport":{"cputime":"1.750","walltime":"1.884","ppvisitednodes":{"value":12392,"limit":1000000},"postexpandincludesize":{"value":545842,"limit":2097152},"templateargumentsize":{"value":7120,"limit":2097152},"expansiondepth":{"value":15,"limit":100},"expensivefunctioncount":{"value":19,"limit":500},"unstrip-depth":{"value":1,"limit":20},"unstrip-size":{"value":741002,"limit":5000000},"entityaccesscount":{"value":0,"limit":400},"timingprofile":["100.00% 1602.875 1 -total"," 74.35% 1191.766 1 Template:Reflist"," 20.64% 330.759 68 Template:Cite_web"," 18.73% 300.271 56 Template:Cite_journal"," 6.33% 101.505 1 Template:Harvnb"," 5.10% 81.696 1 Template:In_lang"," 4.91% 78.635 20 Template:Cite_news"," 4.54% 72.719 1 Template:Artificial_intelligence"," 4.49% 71.934 18 Template:Cite_book"," 4.46% 71.431 1 Template:Sidebar_with_collapsible_lists"]},"scribunto":{"limitreport-timeusage":{"value":"1.178","limit":"10.000"},"limitreport-memusage":{"value":27879394,"limit":52428800},"limitreport-logs":"anchor_id_list = table#1 {\n [\"CITEREF\"] = 1,\n [\"CITEREFAbdallaWahleRuasNévéol2023\"] = 1,\n [\"CITEREFAgarwalEdelman2020\"] = 1,\n [\"CITEREFAl-Rodhan2015\"] = 1,\n [\"CITEREFAleksander2017\"] = 1,\n [\"CITEREFAnadiotis2022\"] = 1,\n [\"CITEREFAnderson\"] = 1,\n [\"CITEREFAndersonAnderson2006\"] = 1,\n [\"CITEREFAndersonAnderson2007\"] = 1,\n [\"CITEREFAndersonAnderson2011\"] = 1,\n [\"CITEREFAnonymous2018\"] = 1,\n [\"CITEREFAsimov2008\"] = 1,\n [\"CITEREFBachulskaLeonardOertel2024\"] = 1,\n [\"CITEREFBassettSteinmuellerVoss\"] = 1,\n [\"CITEREFBenderFriedman2018\"] = 1,\n [\"CITEREFBirch2017\"] = 1,\n [\"CITEREFBostrom2017\"] = 1,\n [\"CITEREFBostromYudkowsky2011\"] = 1,\n [\"CITEREFBoyles2017\"] = 1,\n [\"CITEREFBrandon_Vigliarolo\"] = 1,\n [\"CITEREFBringsjordGovindarajulu2020\"] = 1,\n [\"CITEREFBunn2020\"] = 1,\n [\"CITEREFBuskerChoenniShoae_Bargh2023\"] = 1,\n [\"CITEREFBuylDe_Bie2022\"] = 1,\n [\"CITEREFCastelnovoInverardiNaninoPenco2023\"] = 1,\n [\"CITEREFCat_Zakrzewski2015\"] = 1,\n [\"CITEREFCave,_StephenDihal,_KantaDillon,_Sarah2020\"] = 1,\n [\"CITEREFCaveDihal2020\"] = 1,\n [\"CITEREFCerquiWarwick2008\"] = 1,\n [\"CITEREFChallenDennyPittGompels2019\"] = 1,\n [\"CITEREFChalmers2023\"] = 1,\n [\"CITEREFChengDurmusJurafsky2023\"] = 1,\n [\"CITEREFChristian2021\"] = 1,\n [\"CITEREFCirilloCatuara-SolarzMoreyGuney2020\"] = 1,\n [\"CITEREFCommunications2019\"] = 1,\n [\"CITEREFCurtisGillespieLockey2022\"] = 1,\n [\"CITEREFDanaher,_John2019\"] = 1,\n [\"CITEREFDavies2016\"] = 1,\n [\"CITEREFDelbridge\"] = 1,\n [\"CITEREFDominguez2022\"] = 1,\n [\"CITEREFDongMuJinQi2024\"] = 1,\n [\"CITEREFDoomen2023\"] = 1,\n [\"CITEREFEisikovits\"] = 1,\n [\"CITEREFEtzioniEtzioni2017\"] = 1,\n [\"CITEREFEuropean_Commission_High-Level_Expert_Group_on_AI2019\"] = 1,\n [\"CITEREFEvans2015\"] = 1,\n [\"CITEREFFederspielMitchellAsokanUmana2023\"] = 2,\n [\"CITEREFFengParkLiuTsvetkov2023\"] = 1,\n [\"CITEREFFiegerman2016\"] = 1,\n [\"CITEREFFisher2020\"] = 1,\n [\"CITEREFFloridiCowls2019\"] = 1,\n [\"CITEREFFloridiCowlsBeltramettiChatila2018\"] = 1,\n [\"CITEREFFloridiCowlsKingTaddeo2020\"] = 1,\n [\"CITEREFFriedmanNissenbaum1996\"] = 1,\n [\"CITEREFFukuda-ParrGibbons2021\"] = 1,\n [\"CITEREFFung2023\"] = 1,\n [\"CITEREFGabriel2018\"] = 1,\n [\"CITEREFGebruMorgensternVecchioneVaughan2018\"] = 1,\n [\"CITEREFGraceSalvatierDafoeZhang2018\"] = 1,\n [\"CITEREFHagendorff2020\"] = 1,\n [\"CITEREFHammond2023\"] = 1,\n [\"CITEREFHellström2013\"] = 1,\n [\"CITEREFHenderson2007\"] = 1,\n [\"CITEREFHibbard2015\"] = 1,\n [\"CITEREFHoward2019\"] = 1,\n [\"CITEREFImranPosokhovaQureshiMasood2020\"] = 1,\n [\"CITEREFInanUpasaniChiRungta2023\"] = 1,\n [\"CITEREFJames_J._HughesLaGrandeur,_Kevin2017\"] = 1,\n [\"CITEREFJamison2024\"] = 1,\n [\"CITEREFJerreat-Poole2020\"] = 1,\n [\"CITEREFJobinIencaVayena2020\"] = 1,\n [\"CITEREFJr1999\"] = 1,\n [\"CITEREFKamilaJasrotia2023\"] = 1,\n [\"CITEREFKaplanHaenlein2019\"] = 1,\n [\"CITEREFKnight\"] = 2,\n [\"CITEREFKoeneckeNamLakeNudell2020\"] = 1,\n [\"CITEREFKotekDockumSun2023\"] = 1,\n [\"CITEREFLee2020\"] = 1,\n [\"CITEREFLevinWong2018\"] = 1,\n [\"CITEREFLiRuijsLu2022\"] = 1,\n [\"CITEREFLodge2014\"] = 1,\n [\"CITEREFLohr2018\"] = 1,\n [\"CITEREFLuoPuettSmith2023\"] = 1,\n [\"CITEREFMacrae2022\"] = 1,\n [\"CITEREFManyika2022\"] = 1,\n [\"CITEREFMarkoff2009\"] = 1,\n [\"CITEREFMaxmen2018\"] = 1,\n [\"CITEREFMetz2023\"] = 1,\n [\"CITEREFMitra2018\"] = 1,\n [\"CITEREFMüller2020\"] = 2,\n [\"CITEREFNtoutsiFafaliosGadirajuIosifidis2020\"] = 2,\n [\"CITEREFOlson\"] = 1,\n [\"CITEREFPery2021\"] = 1,\n [\"CITEREFPiper2024\"] = 1,\n [\"CITEREFRoose2020\"] = 1,\n [\"CITEREFRuggieriAlvarezPugnanaState2023\"] = 1,\n [\"CITEREFRussell2019\"] = 1,\n [\"CITEREFRussellHauertAltmanVeloso2015\"] = 1,\n [\"CITEREFSantos-Lang2002\"] = 1,\n [\"CITEREFSauer2022\"] = 1,\n [\"CITEREFSheliazhenko2017\"] = 1,\n [\"CITEREFShulmanBostrom2021\"] = 1,\n [\"CITEREFSlotaFleischmannGreenbergVerma2023\"] = 1,\n [\"CITEREFSpindler2023\"] = 1,\n [\"CITEREFStewartMelton\"] = 1,\n [\"CITEREFStilgoe2020\"] = 1,\n [\"CITEREFThomas_Metzinger2021\"] = 1,\n [\"CITEREFUmbrello2019\"] = 1,\n [\"CITEREFUmbrelloBaum2018\"] = 1,\n [\"CITEREFUmbrelloTorresDe_Bellis2020\"] = 1,\n [\"CITEREFUnited_States._Defense_Innovation_Board\"] = 1,\n [\"CITEREFVan_Eyghen2024\"] = 1,\n [\"CITEREFVelasco2020\"] = 1,\n [\"CITEREFVeruggio,_Gianmarco2011\"] = 1,\n [\"CITEREFVillasenor2019\"] = 1,\n [\"CITEREFVincent2023\"] = 1,\n [\"CITEREFWallachAllen2008\"] = 1,\n [\"CITEREFWallachVallor2020\"] = 1,\n [\"CITEREFWilks,_Yorick2010\"] = 1,\n [\"CITEREFWinfieldMichaelPittEvers2019\"] = 1,\n [\"CITEREFWong2023\"] = 1,\n [\"CITEREFYampolskiy2020\"] = 1,\n [\"CITEREFYao2024\"] = 1,\n [\"CITEREFYudkowsky2004\"] = 1,\n [\"CITEREFZach_Musgrave_and_Bryan_W._Roberts2015\"] = 1,\n [\"CITEREFZhouTan2023\"] = 1,\n [\"CITEREFŠekrstMcHughCefalu2024\"] = 1,\n [\"DeloitteGDPR\"] = 1,\n [\"WiredMS\"] = 1,\n [\"bs\"] = 1,\n [\"lacuna\"] = 1,\n [\"p7001\"] = 1,\n [\"principles\"] = 1,\n}\ntemplate_list = table#1 {\n [\"!\"] = 3,\n [\"Artificial intelligence\"] = 1,\n [\"Artificial intelligence navbox\"] = 1,\n [\"Better source needed\"] = 1,\n [\"Citation\"] = 6,\n [\"Cite arXiv\"] = 10,\n [\"Cite book\"] = 18,\n [\"Cite journal\"] = 56,\n [\"Cite magazine\"] = 2,\n [\"Cite news\"] = 20,\n [\"Cite web\"] = 68,\n [\"Cn\"] = 1,\n [\"Columns-list\"] = 1,\n [\"Cs1 config\"] = 1,\n [\"Dubious\"] = 1,\n [\"Ethics\"] = 1,\n [\"Existential risk from artificial intelligence\"] = 1,\n [\"Failed verification\"] = 1,\n [\"Further\"] = 1,\n [\"Harvnb\"] = 1,\n [\"In lang\"] = 1,\n [\"Main\"] = 7,\n [\"Main articles\"] = 1,\n [\"McCorduck 2004\"] = 1,\n [\"Philosophy of science\"] = 1,\n [\"ProQuest\"] = 1,\n [\"Reflist\"] = 1,\n [\"Rp\"] = 2,\n [\"Short description\"] = 1,\n [\"Webarchive\"] = 14,\n}\narticle_whitelist = table#1 {\n}\nciteref_patterns = table#1 {\n}\ntable#1 {\n}\ntable#1 {\n}\n","limitreport-profile":[["?","240","22.2"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::callParserFunction","140","13.0"],["type","80","7.4"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::gsub","80","7.4"],["dataWrapper \u003Cmw.lua:672\u003E","80","7.4"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::find","80","7.4"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::preprocess","60","5.6"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::len","60","5.6"],["\u003Cmw.title.lua:50\u003E","40","3.7"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::getAllExpandedArguments","40","3.7"],["[others]","180","16.7"]]},"cachereport":{"origin":"mw-web.codfw.main-b766959bd-l4rcj","timestamp":"20250214041836","ttl":2592000,"transientcontent":false}}});});</script> <script type="application/ld+json">{"@context":"https:\/\/schema.org","@type":"Article","name":"Ethics of artificial intelligence","url":"https:\/\/en.wikipedia.org\/wiki\/Ethics_of_artificial_intelligence","sameAs":"http:\/\/www.wikidata.org\/entity\/Q12727779","mainEntity":"http:\/\/www.wikidata.org\/entity\/Q12727779","author":{"@type":"Organization","name":"Contributors to Wikimedia projects"},"publisher":{"@type":"Organization","name":"Wikimedia Foundation, Inc.","logo":{"@type":"ImageObject","url":"https:\/\/www.wikimedia.org\/static\/images\/wmf-hor-googpub.png"}},"datePublished":"2007-10-10T08:24:29Z","dateModified":"2025-02-10T04:23:48Z","headline":"ethics of technology specific to robots and other artificially intelligent beings"}</script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10