CINXE.COM
Consciousness Studies/The Philosophical Problem/Machine Consciousness - Wikibooks, open books for an open world
<!DOCTYPE html> <html class="client-nojs vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-sticky-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-custom-font-size-clientpref-1 vector-feature-appearance-pinned-clientpref-1 vector-feature-night-mode-disabled skin-theme-clientpref-day vector-toc-available" lang="en" dir="ltr"> <head> <meta charset="UTF-8"> <title>Consciousness Studies/The Philosophical Problem/Machine Consciousness - Wikibooks, open books for an open world</title> <script>(function(){var className="client-js vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-sticky-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-custom-font-size-clientpref-1 vector-feature-appearance-pinned-clientpref-1 vector-feature-night-mode-disabled skin-theme-clientpref-day vector-toc-available";var cookie=document.cookie.match(/(?:^|; )enwikibooksmwclientpreferences=([^;]+)/);if(cookie){cookie[1].split('%2C').forEach(function(pref){className=className.replace(new RegExp('(^| )'+pref.replace(/-clientpref-\w+$|[^\w-]+/g,'')+'-clientpref-\\w+( |$)'),'$1'+pref+'$2');});}document.documentElement.className=className;}());RLCONF={"wgBreakFrames":false,"wgSeparatorTransformTable":["",""],"wgDigitTransformTable":["",""],"wgDefaultDateFormat": "dmy","wgMonthNames":["","January","February","March","April","May","June","July","August","September","October","November","December"],"wgRequestId":"3aae2c21-7285-4c5e-abc7-c5d0863ca711","wgCanonicalNamespace":"","wgCanonicalSpecialPageName":false,"wgNamespaceNumber":0,"wgPageName":"Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness","wgTitle":"Consciousness Studies/The Philosophical Problem/Machine Consciousness","wgCurRevisionId":4343622,"wgRevisionId":4343622,"wgArticleId":34719,"wgIsArticle":true,"wgIsRedirect":false,"wgAction":"view","wgUserName":null,"wgUserGroups":["*"],"wgCategories":["Book:Consciousness Studies"],"wgPageViewLanguage":"en","wgPageContentLanguage":"en","wgPageContentModel":"wikitext","wgRelevantPageName":"Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness","wgRelevantArticleId":34719,"wgIsProbablyEditable":true,"wgRelevantPageIsProbablyEditable":true,"wgRestrictionEdit":[],"wgRestrictionMove":[],"wgNoticeProject": "wikibooks","wgCiteReferencePreviewsActive":true,"wgFlaggedRevsParams":{"tags":{"value":{"levels":3}}},"wgStableRevisionId":4343622,"wgMediaViewerOnClick":true,"wgMediaViewerEnabledByDefault":true,"wgVisualEditor":{"pageLanguageCode":"en","pageLanguageDir":"ltr","pageVariantFallbacks":"en"},"wgMFDisplayWikibaseDescriptions":{"search":true,"watchlist":true,"tagline":false,"nearby":true},"wgWMESchemaEditAttemptStepOversample":false,"wgWMEPageLength":70000,"wgCentralAuthMobileDomain":false,"wgEditSubmitButtonLabelPublish":true,"wgULSPosition":"interlanguage","wgULSisCompactLinksEnabled":false,"wgVector2022LanguageInHeader":true,"wgULSisLanguageSelectorEmpty":false,"wgCheckUserClientHintsHeadersJsApi":["brands","architecture","bitness","fullVersionList","mobile","model","platform","platformVersion"],"wgSiteNoticeId":"2.24"};RLSTATE={"ext.globalCssJs.user.styles":"ready","site.styles":"ready","user.styles":"ready","ext.globalCssJs.user":"ready","user":"ready","user.options":"loading", "skins.vector.search.codex.styles":"ready","skins.vector.styles":"ready","skins.vector.icons":"ready","ext.flaggedRevs.basic":"ready","mediawiki.codex.messagebox.styles":"ready","ext.wikimediamessages.styles":"ready","ext.visualEditor.desktopArticleTarget.noscript":"ready","ext.uls.interlanguage":"ready","wikibase.client.init":"ready","ext.wikimediaBadges":"ready","ext.dismissableSiteNotice.styles":"ready"};RLPAGEMODULES=["site","mediawiki.page.ready","mediawiki.toc","skins.vector.js","ext.centralNotice.geoIP","ext.centralNotice.startUp","ext.flaggedRevs.advanced","ext.gadget.wikidialog","ext.gadget.commons-file","ext.urlShortener.toolbar","ext.centralauth.centralautologin","mmv.bootstrap","ext.visualEditor.desktopArticleTarget.init","ext.visualEditor.targetLoader","ext.echo.centralauth","ext.eventLogging","ext.wikimediaEvents","ext.navigationTiming","ext.uls.interface","ext.checkUser.clientHints","ext.dismissableSiteNotice"];</script> <script>(RLQ=window.RLQ||[]).push(function(){mw.loader.impl(function(){return["user.options@12s5i",function($,jQuery,require,module){mw.user.tokens.set({"patrolToken":"+\\","watchToken":"+\\","csrfToken":"+\\"}); }];});});</script> <link rel="stylesheet" href="/w/load.php?lang=en&modules=ext.dismissableSiteNotice.styles%7Cext.flaggedRevs.basic%7Cext.uls.interlanguage%7Cext.visualEditor.desktopArticleTarget.noscript%7Cext.wikimediaBadges%7Cext.wikimediamessages.styles%7Cmediawiki.codex.messagebox.styles%7Cskins.vector.icons%2Cstyles%7Cskins.vector.search.codex.styles%7Cwikibase.client.init&only=styles&skin=vector-2022"> <script async="" src="/w/load.php?lang=en&modules=startup&only=scripts&raw=1&skin=vector-2022"></script> <meta name="ResourceLoaderDynamicStyles" content=""> <link rel="stylesheet" href="/w/load.php?lang=en&modules=site.styles&only=styles&skin=vector-2022"> <meta name="generator" content="MediaWiki 1.44.0-wmf.4"> <meta name="referrer" content="origin"> <meta name="referrer" content="origin-when-cross-origin"> <meta name="robots" content="noindex,nofollow,max-image-preview:standard"> <meta name="format-detection" content="telephone=no"> <meta name="viewport" content="width=1120"> <meta property="og:title" content="Consciousness Studies/The Philosophical Problem/Machine Consciousness - Wikibooks, open books for an open world"> <meta property="og:type" content="website"> <link rel="preconnect" href="//upload.wikimedia.org"> <link rel="alternate" media="only screen and (max-width: 640px)" href="//en.m.wikibooks.org/wiki/Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness"> <link rel="alternate" type="application/x-wiki" title="Edit" href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=edit"> <link rel="icon" href="/static/favicon/wikibooks.ico"> <link rel="search" type="application/opensearchdescription+xml" href="/w/rest.php/v1/search" title="Wikibooks (en)"> <link rel="EditURI" type="application/rsd+xml" href="//en.wikibooks.org/w/api.php?action=rsd"> <link rel="canonical" href="https://en.wikibooks.org/wiki/Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness"> <link rel="license" href="https://creativecommons.org/licenses/by-sa/4.0/deed.en"> <link rel="alternate" type="application/atom+xml" title="Wikibooks Atom feed" href="/w/index.php?title=Special:RecentChanges&feed=atom"> <link rel="dns-prefetch" href="//meta.wikimedia.org" /> <link rel="dns-prefetch" href="//login.wikimedia.org"> </head> <body class="skin--responsive skin-vector skin-vector-search-vue mediawiki ltr sitedir-ltr mw-hide-empty-elt ns-0 ns-subject mw-editable page-Consciousness_Studies_The_Philosophical_Problem_Machine_Consciousness rootpage-Consciousness_Studies skin-vector-2022 action-view"><a class="mw-jump-link" href="#bodyContent">Jump to content</a> <div class="vector-header-container"> <header class="vector-header mw-header"> <div class="vector-header-start"> <nav class="vector-main-menu-landmark" aria-label="Site"> <div id="vector-main-menu-dropdown" class="vector-dropdown vector-main-menu-dropdown vector-button-flush-left vector-button-flush-right" > <input type="checkbox" id="vector-main-menu-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-main-menu-dropdown" class="vector-dropdown-checkbox " aria-label="Main menu" > <label id="vector-main-menu-dropdown-label" for="vector-main-menu-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-menu mw-ui-icon-wikimedia-menu"></span> <span class="vector-dropdown-label-text">Main menu</span> </label> <div class="vector-dropdown-content"> <div id="vector-main-menu-unpinned-container" class="vector-unpinned-container"> <div id="vector-main-menu" class="vector-main-menu vector-pinnable-element"> <div class="vector-pinnable-header vector-main-menu-pinnable-header vector-pinnable-header-unpinned" data-feature-name="main-menu-pinned" data-pinnable-element-id="vector-main-menu" data-pinned-container-id="vector-main-menu-pinned-container" data-unpinned-container-id="vector-main-menu-unpinned-container" > <div class="vector-pinnable-header-label">Main menu</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-main-menu.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-main-menu.unpin">hide</button> </div> <div id="p-navigation" class="vector-menu mw-portlet mw-portlet-navigation" > <div class="vector-menu-heading"> Navigation </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-mainpage" class="mw-list-item"><a href="/wiki/Main_Page" title="Visit the main page [z]" accesskey="z"><span>Main Page</span></a></li><li id="n-help" class="mw-list-item"><a href="/wiki/Help:Contents" title="Find help on how to use and edit Wikibooks"><span>Help</span></a></li><li id="n-Browse" class="mw-list-item"><a href="/wiki/Wikibooks:Card_Catalog_Office" title="Check out what Wikibooks has to offer"><span>Browse</span></a></li><li id="n-Cookbook" class="mw-list-item"><a href="/wiki/Cookbook:Table_of_Contents" title="Learn recipes from around the world"><span>Cookbook</span></a></li><li id="n-Wikijunior" class="mw-list-item"><a href="/wiki/Wikijunior" title="Books for children"><span>Wikijunior</span></a></li><li id="n-Featured-books" class="mw-list-item"><a href="/wiki/Wikibooks:Featured_books" title="The best of Wikibooks"><span>Featured books</span></a></li><li id="n-recentchanges" class="mw-list-item"><a href="/wiki/Special:RecentChanges" title="A list of recent changes in the wiki [r]" accesskey="r"><span>Recent changes</span></a></li><li id="n-randomrootpage" class="mw-list-item"><a href="/wiki/Special:RandomInCategory/Book:Wikibooks_Stacks/Books"><span>Random book</span></a></li><li id="n-Using-Wikibooks" class="mw-list-item"><a href="/wiki/Using_Wikibooks"><span>Using Wikibooks</span></a></li> </ul> </div> </div> <div id="p-community" class="vector-menu mw-portlet mw-portlet-community" > <div class="vector-menu-heading"> Community </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-Reading-room-forum" class="mw-list-item"><a href="/wiki/Wikibooks:Reading_room"><span>Reading room forum</span></a></li><li id="n-portal" class="mw-list-item"><a href="/wiki/Wikibooks:Community_Portal" title="Find your way around the Wikibooks community"><span>Community portal</span></a></li><li id="n-currentevents" class="mw-list-item"><a href="/wiki/Wikibooks:Reading_room/Bulletin_Board" title="Important community news"><span>Bulletin Board</span></a></li><li id="n-maintenance" class="mw-list-item"><a href="/wiki/Wikibooks:Maintenance" title="Frequent tasks that you can help with"><span>Help out!</span></a></li><li id="n-Policies-and-guidelines" class="mw-list-item"><a href="/wiki/Wikibooks:Policies_and_guidelines" title="Pages detailing important rules and procedures"><span>Policies and guidelines</span></a></li><li id="n-contact" class="mw-list-item"><a href="/wiki/Wikibooks:Contact_us" title="Alternative methods of communication"><span>Contact us</span></a></li> </ul> </div> </div> </div> </div> </div> </div> </nav> <a href="/wiki/Main_Page" class="mw-logo"> <img class="mw-logo-icon" src="/static/images/icons/wikibooks.svg" alt="" aria-hidden="true" height="50" width="50"> <span class="mw-logo-container skin-invert"> <img class="mw-logo-wordmark" alt="Wikibooks" src="/static/images/mobile/copyright/wikibooks-wordmark-vi.svg" style="width: 7.5em; height: 0.9375em;"> <img class="mw-logo-tagline" alt="The Free Textbook Project" src="/static/images/mobile/copyright/wikibooks-tagline-en.svg" width="120" height="10" style="width: 7.5em; height: 0.625em;"> </span> </a> </div> <div class="vector-header-end"> <div id="p-search" role="search" class="vector-search-box-vue vector-search-box-collapses vector-search-box-show-thumbnail vector-search-box-auto-expand-width vector-search-box"> <a href="/wiki/Special:Search" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only search-toggle" title="Search Wikibooks [f]" accesskey="f"><span class="vector-icon mw-ui-icon-search mw-ui-icon-wikimedia-search"></span> <span>Search</span> </a> <div class="vector-typeahead-search-container"> <div class="cdx-typeahead-search cdx-typeahead-search--show-thumbnail cdx-typeahead-search--auto-expand-width"> <form action="/w/index.php" id="searchform" class="cdx-search-input cdx-search-input--has-end-button"> <div id="simpleSearch" class="cdx-search-input__input-wrapper" data-search-loc="header-moved"> <div class="cdx-text-input cdx-text-input--has-start-icon"> <input class="cdx-text-input__input" type="search" name="search" placeholder="Search Wikibooks" aria-label="Search Wikibooks" autocapitalize="sentences" title="Search Wikibooks [f]" accesskey="f" id="searchInput" > <span class="cdx-text-input__icon cdx-text-input__start-icon"></span> </div> <input type="hidden" name="title" value="Special:Search"> </div> <button class="cdx-button cdx-search-input__end-button">Search</button> </form> </div> </div> </div> <nav class="vector-user-links vector-user-links-wide" aria-label="Personal tools"> <div class="vector-user-links-main"> <div id="p-vector-user-menu-preferences" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <div id="p-vector-user-menu-userpage" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <nav class="vector-appearance-landmark" aria-label="Appearance"> <div id="vector-appearance-dropdown" class="vector-dropdown " title="Change the appearance of the page's font size, width, and color" > <input type="checkbox" id="vector-appearance-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-appearance-dropdown" class="vector-dropdown-checkbox " aria-label="Appearance" > <label id="vector-appearance-dropdown-label" for="vector-appearance-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-appearance mw-ui-icon-wikimedia-appearance"></span> <span class="vector-dropdown-label-text">Appearance</span> </label> <div class="vector-dropdown-content"> <div id="vector-appearance-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <div id="p-vector-user-menu-notifications" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <div id="p-vector-user-menu-overflow" class="vector-menu mw-portlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-sitesupport-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="//donate.wikimedia.org/wiki/Special:FundraiserRedirector?utm_source=donate&utm_medium=sidebar&utm_campaign=C13_en.wikibooks.org&uselang=en" class=""><span>Donations</span></a> </li> <li id="pt-createaccount-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:CreateAccount&returnto=Consciousness+Studies%2FThe+Philosophical+Problem%2FMachine+Consciousness&returntoquery=section%3DT-8%26veaction%3Dedit%26redirect%3Dno" title="You are encouraged to create an account and log in; however, it is not mandatory" class=""><span>Create account</span></a> </li> <li id="pt-login-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:UserLogin&returnto=Consciousness+Studies%2FThe+Philosophical+Problem%2FMachine+Consciousness&returntoquery=section%3DT-8%26veaction%3Dedit%26redirect%3Dno" title="You are encouraged to log in; however, it is not mandatory [o]" accesskey="o" class=""><span>Log in</span></a> </li> </ul> </div> </div> </div> <div id="vector-user-links-dropdown" class="vector-dropdown vector-user-menu vector-button-flush-right vector-user-menu-logged-out" title="More options" > <input type="checkbox" id="vector-user-links-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-user-links-dropdown" class="vector-dropdown-checkbox " aria-label="Personal tools" > <label id="vector-user-links-dropdown-label" for="vector-user-links-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-ellipsis mw-ui-icon-wikimedia-ellipsis"></span> <span class="vector-dropdown-label-text">Personal tools</span> </label> <div class="vector-dropdown-content"> <div id="p-personal" class="vector-menu mw-portlet mw-portlet-personal user-links-collapsible-item" title="User menu" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-sitesupport" class="user-links-collapsible-item mw-list-item"><a href="//donate.wikimedia.org/wiki/Special:FundraiserRedirector?utm_source=donate&utm_medium=sidebar&utm_campaign=C13_en.wikibooks.org&uselang=en"><span>Donations</span></a></li><li id="pt-createaccount" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:CreateAccount&returnto=Consciousness+Studies%2FThe+Philosophical+Problem%2FMachine+Consciousness&returntoquery=section%3DT-8%26veaction%3Dedit%26redirect%3Dno" title="You are encouraged to create an account and log in; however, it is not mandatory"><span class="vector-icon mw-ui-icon-userAdd mw-ui-icon-wikimedia-userAdd"></span> <span>Create account</span></a></li><li id="pt-login" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:UserLogin&returnto=Consciousness+Studies%2FThe+Philosophical+Problem%2FMachine+Consciousness&returntoquery=section%3DT-8%26veaction%3Dedit%26redirect%3Dno" title="You are encouraged to log in; however, it is not mandatory [o]" accesskey="o"><span class="vector-icon mw-ui-icon-logIn mw-ui-icon-wikimedia-logIn"></span> <span>Log in</span></a></li> </ul> </div> </div> <div id="p-user-menu-anon-editor" class="vector-menu mw-portlet mw-portlet-user-menu-anon-editor" > <div class="vector-menu-heading"> Pages for logged out editors <a href="/wiki/Help:Introduction" aria-label="Learn more about editing"><span>learn more</span></a> </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-anoncontribs" class="mw-list-item"><a href="/wiki/Special:MyContributions" title="A list of edits made from this IP address [y]" accesskey="y"><span>Contributions</span></a></li><li id="pt-anontalk" class="mw-list-item"><a href="/wiki/Special:MyTalk" title="Discussion about edits from this IP address [n]" accesskey="n"><span>Discussion for this IP address</span></a></li> </ul> </div> </div> </div> </div> </nav> </div> </header> </div> <div class="mw-page-container"> <div class="mw-page-container-inner"> <div class="vector-sitenotice-container"> <div id="siteNotice"><div id="mw-dismissablenotice-anonplace"></div><script>(function(){var node=document.getElementById("mw-dismissablenotice-anonplace");if(node){node.outerHTML="\u003Cdiv class=\"mw-dismissable-notice\"\u003E\u003Cdiv class=\"mw-dismissable-notice-close\"\u003E[\u003Ca tabindex=\"0\" role=\"button\"\u003Edismiss\u003C/a\u003E]\u003C/div\u003E\u003Cdiv class=\"mw-dismissable-notice-body\"\u003E\u003C!-- CentralNotice --\u003E\u003Cdiv id=\"localNotice\" data-nosnippet=\"\"\u003E\u003Cdiv class=\"anonnotice\" lang=\"en\" dir=\"ltr\"\u003E\u003Cdiv style=\"width: fit-content; padding: 5px; border: 3px solid gray; background-color: #fafafa; text-align: center; margin: auto;\"\u003E\n\u003Cp\u003E\u003Cspan typeof=\"mw:File\"\u003E\u003Ca href=\"/wiki/File:OOjs_UI_icon_robot.svg\" class=\"mw-file-description\"\u003E\u003Cimg src=\"//upload.wikimedia.org/wikipedia/commons/thumb/7/71/OOjs_UI_icon_robot.svg/40px-OOjs_UI_icon_robot.svg.png\" decoding=\"async\" width=\"40\" height=\"40\" class=\"mw-file-element\" srcset=\"//upload.wikimedia.org/wikipedia/commons/thumb/7/71/OOjs_UI_icon_robot.svg/60px-OOjs_UI_icon_robot.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/7/71/OOjs_UI_icon_robot.svg/80px-OOjs_UI_icon_robot.svg.png 2x\" data-file-width=\"20\" data-file-height=\"20\" /\u003E\u003C/a\u003E\u003C/span\u003E The Wikibooks community is developing a policy on the use of generative AI. Please review the \u003Ca href=\"/wiki/Wikibooks:Artificial_Intelligence\" title=\"Wikibooks:Artificial Intelligence\"\u003Edraft policy\u003C/a\u003E and provide feedback on \u003Ca href=\"/wiki/Wikibooks_talk:Artificial_Intelligence\" title=\"Wikibooks talk:Artificial Intelligence\"\u003Eits talk page\u003C/a\u003E.\n\u003C/p\u003E\n\u003C/div\u003E\u003C/div\u003E\u003C/div\u003E\u003C/div\u003E\u003C/div\u003E";}}());</script></div> </div> <div class="vector-column-start"> <div class="vector-main-menu-container"> <div id="mw-navigation"> <nav id="mw-panel" class="vector-main-menu-landmark" aria-label="Site"> <div id="vector-main-menu-pinned-container" class="vector-pinned-container"> </div> </nav> </div> </div> <div class="vector-sticky-pinned-container"> <nav id="mw-panel-toc" aria-label="Contents" data-event-name="ui.sidebar-toc" class="mw-table-of-contents-container vector-toc-landmark"> <div id="vector-toc-pinned-container" class="vector-pinned-container"> <div id="vector-toc" class="vector-toc vector-pinnable-element"> <div class="vector-pinnable-header vector-toc-pinnable-header vector-pinnable-header-pinned" data-feature-name="toc-pinned" data-pinnable-element-id="vector-toc" > <h2 class="vector-pinnable-header-label">Contents</h2> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-toc.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-toc.unpin">hide</button> </div> <ul class="vector-toc-contents" id="mw-panel-toc-list"> <li id="toc-mw-content-text" class="vector-toc-list-item vector-toc-level-1"> <a href="#" class="vector-toc-link"> <div class="vector-toc-text">Beginning</div> </a> </li> <li id="toc-Elementary_Information_and_Information_Systems_Theory" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#Elementary_Information_and_Information_Systems_Theory"> <div class="vector-toc-text"> <span class="vector-toc-numb">1</span> <span>Elementary Information and Information Systems Theory</span> </div> </a> <ul id="toc-Elementary_Information_and_Information_Systems_Theory-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Classification,_signs,_sense,_relations,_supervenience_etc." class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#Classification,_signs,_sense,_relations,_supervenience_etc."> <div class="vector-toc-text"> <span class="vector-toc-numb">2</span> <span>Classification, signs, sense, relations, supervenience etc.</span> </div> </a> <ul id="toc-Classification,_signs,_sense,_relations,_supervenience_etc.-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-The_construction_of_filters:_Bayesian_and_Neural_Network_models" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#The_construction_of_filters:_Bayesian_and_Neural_Network_models"> <div class="vector-toc-text"> <span class="vector-toc-numb">3</span> <span>The construction of filters: Bayesian and Neural Network models</span> </div> </a> <ul id="toc-The_construction_of_filters:_Bayesian_and_Neural_Network_models-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Qualia_and_Information" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#Qualia_and_Information"> <div class="vector-toc-text"> <span class="vector-toc-numb">4</span> <span>Qualia and Information</span> </div> </a> <button aria-controls="toc-Qualia_and_Information-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Qualia and Information subsection</span> </button> <ul id="toc-Qualia_and_Information-sublist" class="vector-toc-list"> <li id="toc-Absent_and_fading_qualia" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Absent_and_fading_qualia"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.1</span> <span>Absent and fading qualia</span> </div> </a> <ul id="toc-Absent_and_fading_qualia-sublist" class="vector-toc-list"> <li id="toc-Absent_qualia" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Absent_qualia"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.1.1</span> <span>Absent qualia</span> </div> </a> <ul id="toc-Absent_qualia-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Fading_qualia" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Fading_qualia"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.1.2</span> <span>Fading qualia</span> </div> </a> <ul id="toc-Fading_qualia-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Putnam's_twin_earth_thought_experiment" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Putnam's_twin_earth_thought_experiment"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.2</span> <span>Putnam's twin earth thought experiment</span> </div> </a> <ul id="toc-Putnam's_twin_earth_thought_experiment-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-The_Inverted_Qualia_Argument" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#The_Inverted_Qualia_Argument"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.3</span> <span>The Inverted Qualia Argument</span> </div> </a> <ul id="toc-The_Inverted_Qualia_Argument-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-The_Knowledge_Argument" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#The_Knowledge_Argument"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.4</span> <span>The Knowledge Argument</span> </div> </a> <ul id="toc-The_Knowledge_Argument-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-The_problem_of_machine_and_digital_consciousness" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#The_problem_of_machine_and_digital_consciousness"> <div class="vector-toc-text"> <span class="vector-toc-numb">5</span> <span>The problem of machine and digital consciousness</span> </div> </a> <button aria-controls="toc-The_problem_of_machine_and_digital_consciousness-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle The problem of machine and digital consciousness subsection</span> </button> <ul id="toc-The_problem_of_machine_and_digital_consciousness-sublist" class="vector-toc-list"> <li id="toc-Information_processing_and_digital_computers" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Information_processing_and_digital_computers"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.1</span> <span>Information processing and digital computers</span> </div> </a> <ul id="toc-Information_processing_and_digital_computers-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-The_meaning_of_meaning_and_the_Symbol_Grounding_Problem" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#The_meaning_of_meaning_and_the_Symbol_Grounding_Problem"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.2</span> <span>The meaning of meaning and the Symbol Grounding Problem</span> </div> </a> <ul id="toc-The_meaning_of_meaning_and_the_Symbol_Grounding_Problem-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Artificial_consciousness_beyond_information_processing" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Artificial_consciousness_beyond_information_processing"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.3</span> <span>Artificial consciousness beyond information processing</span> </div> </a> <ul id="toc-Artificial_consciousness_beyond_information_processing-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-The_Computability_Problem_and_Halting_of_Turing_Machines" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#The_Computability_Problem_and_Halting_of_Turing_Machines"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.4</span> <span>The Computability Problem and Halting of Turing Machines</span> </div> </a> <ul id="toc-The_Computability_Problem_and_Halting_of_Turing_Machines-sublist" class="vector-toc-list"> <li id="toc-The_Church-Turing_thesis" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#The_Church-Turing_thesis"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.4.1</span> <span>The Church-Turing thesis</span> </div> </a> <ul id="toc-The_Church-Turing_thesis-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Turing_machines" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Turing_machines"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.4.2</span> <span>Turing machines</span> </div> </a> <ul id="toc-Turing_machines-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-The_halting_problem" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#The_halting_problem"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.4.3</span> <span>The halting problem</span> </div> </a> <ul id="toc-The_halting_problem-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-The_simulation_argument" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#The_simulation_argument"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.5</span> <span>The simulation argument</span> </div> </a> <ul id="toc-The_simulation_argument-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Notes_and_References" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#Notes_and_References"> <div class="vector-toc-text"> <span class="vector-toc-numb">6</span> <span>Notes and References</span> </div> </a> <ul id="toc-Notes_and_References-sublist" class="vector-toc-list"> </ul> </li> </ul> </div> </div> </nav> </div> </div> <div class="mw-content-container"> <main id="content" class="mw-body"> <header class="mw-body-header vector-page-titlebar"> <nav aria-label="Contents" class="vector-toc-landmark"> <div id="vector-page-titlebar-toc" class="vector-dropdown vector-page-titlebar-toc vector-button-flush-left" > <input type="checkbox" id="vector-page-titlebar-toc-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-titlebar-toc" class="vector-dropdown-checkbox " aria-label="Toggle the table of contents" > <label id="vector-page-titlebar-toc-label" for="vector-page-titlebar-toc-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-listBullet mw-ui-icon-wikimedia-listBullet"></span> <span class="vector-dropdown-label-text">Toggle the table of contents</span> </label> <div class="vector-dropdown-content"> <div id="vector-page-titlebar-toc-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <h1 id="firstHeading" class="firstHeading mw-first-heading"><span class="mw-page-title-main">Consciousness Studies/The Philosophical Problem/Machine Consciousness</span></h1> <div id="p-lang-btn" class="vector-dropdown mw-portlet mw-portlet-lang" > <input type="checkbox" id="p-lang-btn-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-p-lang-btn" class="vector-dropdown-checkbox mw-interlanguage-selector" aria-label="This article exist only in this language. Add the article for other languages" > <label id="p-lang-btn-label" for="p-lang-btn-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--action-progressive mw-portlet-lang-heading-0" aria-hidden="true" ><span class="vector-icon mw-ui-icon-language-progressive mw-ui-icon-wikimedia-language-progressive"></span> <span class="vector-dropdown-label-text">Add languages</span> </label> <div class="vector-dropdown-content"> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> <div class="after-portlet after-portlet-lang"><span class="uls-after-portlet-link"></span><span class="wb-langlinks-add wb-langlinks-link"><a href="https://www.wikidata.org/wiki/Special:NewItem?site=enwikibooks&page=Consciousness+Studies%2FThe+Philosophical+Problem%2FMachine+Consciousness" title="Add interlanguage links" class="wbc-editpage">Add links</a></span></div> </div> </div> </div> </header> <div class="vector-page-toolbar"> <div class="vector-page-toolbar-container"> <div id="left-navigation"> <nav aria-label="Namespaces"> <div id="p-associated-pages" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-associated-pages" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-nstab-main" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness" title="View the content page [c]" accesskey="c"><span>Book</span></a></li><li id="ca-talk" class="vector-tab-noicon mw-list-item"><a href="/wiki/Talk:Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness" rel="discussion" title="Discussion about the content page [t]" accesskey="t"><span>Discussion</span></a></li> </ul> </div> </div> <div id="vector-variants-dropdown" class="vector-dropdown emptyPortlet" > <input type="checkbox" id="vector-variants-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-variants-dropdown" class="vector-dropdown-checkbox " aria-label="Change language variant" > <label id="vector-variants-dropdown-label" for="vector-variants-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">English</span> </label> <div class="vector-dropdown-content"> <div id="p-variants" class="vector-menu mw-portlet mw-portlet-variants emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> </div> </div> </nav> </div> <div id="right-navigation" class="vector-collapsible"> <nav aria-label="Views"> <div id="p-views" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-views" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-view" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness"><span>Read</span></a></li><li id="ca-ve-edit" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&veaction=edit" title="Edit this page [v]" accesskey="v"><span>Edit</span></a></li><li id="ca-edit" class="collapsible vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=edit" title="Edit the source code of this page [e]" accesskey="e"><span>Edit source</span></a></li><li id="ca-history" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=history" title="Past revisions of this page [h]" accesskey="h"><span>View history</span></a></li> </ul> </div> </div> </nav> <nav class="vector-page-tools-landmark" aria-label="Page tools"> <div id="vector-page-tools-dropdown" class="vector-dropdown vector-page-tools-dropdown" > <input type="checkbox" id="vector-page-tools-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-tools-dropdown" class="vector-dropdown-checkbox " aria-label="Tools" > <label id="vector-page-tools-dropdown-label" for="vector-page-tools-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">Tools</span> </label> <div class="vector-dropdown-content"> <div id="vector-page-tools-unpinned-container" class="vector-unpinned-container"> <div id="vector-page-tools" class="vector-page-tools vector-pinnable-element"> <div class="vector-pinnable-header vector-page-tools-pinnable-header vector-pinnable-header-unpinned" data-feature-name="page-tools-pinned" data-pinnable-element-id="vector-page-tools" data-pinned-container-id="vector-page-tools-pinned-container" data-unpinned-container-id="vector-page-tools-unpinned-container" > <div class="vector-pinnable-header-label">Tools</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-page-tools.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-page-tools.unpin">hide</button> </div> <div id="p-cactions" class="vector-menu mw-portlet mw-portlet-cactions emptyPortlet vector-has-collapsible-items" title="More options" > <div class="vector-menu-heading"> Actions </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-more-view" class="selected vector-more-collapsible-item mw-list-item"><a href="/wiki/Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness"><span>Read</span></a></li><li id="ca-more-ve-edit" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&veaction=edit" title="Edit this page [v]" accesskey="v"><span>Edit</span></a></li><li id="ca-more-edit" class="collapsible vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=edit" title="Edit the source code of this page [e]" accesskey="e"><span>Edit source</span></a></li><li id="ca-more-history" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=history"><span>View history</span></a></li> </ul> </div> </div> <div id="p-tb" class="vector-menu mw-portlet mw-portlet-tb" > <div class="vector-menu-heading"> General </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="t-whatlinkshere" class="mw-list-item"><a href="/wiki/Special:WhatLinksHere/Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness" title="A list of all wiki pages that link here [j]" accesskey="j"><span>What links here</span></a></li><li id="t-recentchangeslinked" class="mw-list-item"><a href="/wiki/Special:RecentChangesLinked/Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness" rel="nofollow" title="Recent changes in pages linked from this page [k]" accesskey="k"><span>Related changes</span></a></li><li id="t-upload" class="mw-list-item"><a href="//commons.wikimedia.org/wiki/Special:UploadWizard?uselang=en" title="Upload files [u]" accesskey="u"><span>Upload file</span></a></li><li id="t-specialpages" class="mw-list-item"><a href="/wiki/Special:SpecialPages" title="A list of all special pages [q]" accesskey="q"><span>Special pages</span></a></li><li id="t-permalink" class="mw-list-item"><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&oldid=4343622" title="Permanent link to this revision of this page"><span>Permanent link</span></a></li><li id="t-info" class="mw-list-item"><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=info" title="More information about this page"><span>Page information</span></a></li><li id="t-cite" class="mw-list-item"><a href="/w/index.php?title=Special:CiteThisPage&page=Consciousness_Studies%2FThe_Philosophical_Problem%2FMachine_Consciousness&id=4343622&wpFormIdentifier=titleform" title="Information on how to cite this page"><span>Cite this page</span></a></li><li id="t-urlshortener" class="mw-list-item"><a href="/w/index.php?title=Special:UrlShortener&url=https%3A%2F%2Fen.wikibooks.org%2Fw%2Findex.php%3Ftitle%3DConsciousness_Studies%2FThe_Philosophical_Problem%2FMachine_Consciousness%26section%3DT-8%26veaction%3Dedit"><span>Get shortened URL</span></a></li><li id="t-urlshortener-qrcode" class="mw-list-item"><a href="/w/index.php?title=Special:QrCode&url=https%3A%2F%2Fen.wikibooks.org%2Fw%2Findex.php%3Ftitle%3DConsciousness_Studies%2FThe_Philosophical_Problem%2FMachine_Consciousness%26section%3DT-8%26veaction%3Dedit"><span>Download QR code</span></a></li> </ul> </div> </div> <div id="p-sister_projects" class="vector-menu mw-portlet mw-portlet-sister_projects" > <div class="vector-menu-heading"> Sister projects </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-Wikipedia" class="mw-list-item"><a href="https://en.wikipedia.org/wiki/Main_Page"><span>Wikipedia</span></a></li><li id="n-Wikiversity" class="mw-list-item"><a href="https://en.wikiversity.org/wiki/Wikiversity:Main_Page"><span>Wikiversity</span></a></li><li id="n-Wiktionary" class="mw-list-item"><a href="https://en.wiktionary.org/wiki/Wiktionary:Main_Page"><span>Wiktionary</span></a></li><li id="n-Wikiquote" class="mw-list-item"><a href="https://en.wikiquote.org/wiki/Main_Page"><span>Wikiquote</span></a></li><li id="n-Wikisource" class="mw-list-item"><a href="https://en.wikisource.org/wiki/Main_Page"><span>Wikisource</span></a></li><li id="n-Wikinews" class="mw-list-item"><a href="https://en.wikinews.org/wiki/Main_Page"><span>Wikinews</span></a></li><li id="n-Wikivoyage" class="mw-list-item"><a href="https://en.wikivoyage.org/wiki/Main_Page"><span>Wikivoyage</span></a></li><li id="n-Commons" class="mw-list-item"><a href="https://commons.wikimedia.org/wiki/Main_Page"><span>Commons</span></a></li><li id="n-Wikidata" class="mw-list-item"><a href="https://www.wikidata.org/wiki/Wikidata:Main_Page"><span>Wikidata</span></a></li><li id="n-MediaWiki" class="mw-list-item"><a href="https://www.mediawiki.org/wiki/Main_Page"><span>MediaWiki</span></a></li><li id="n-Meta-Wiki" class="mw-list-item"><a href="https://meta.wikimedia.org/wiki/Main_Page"><span>Meta-Wiki</span></a></li> </ul> </div> </div> <div id="p-coll-print_export" class="vector-menu mw-portlet mw-portlet-coll-print_export" > <div class="vector-menu-heading"> Print/export </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="coll-create_a_book" class="mw-list-item"><a href="/w/index.php?title=Special:Book&bookcmd=book_creator&referer=Consciousness+Studies%2FThe+Philosophical+Problem%2FMachine+Consciousness"><span>Create a collection</span></a></li><li id="coll-download-as-rl" class="mw-list-item"><a href="/w/index.php?title=Special:DownloadAsPdf&page=Consciousness_Studies%2FThe_Philosophical_Problem%2FMachine_Consciousness&action=show-download-screen"><span>Download as PDF</span></a></li><li id="t-print" class="mw-list-item"><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&printable=yes" title="Printable version of this page [p]" accesskey="p"><span>Printable version</span></a></li> </ul> </div> </div> <div id="p-wikibase-otherprojects" class="vector-menu mw-portlet mw-portlet-wikibase-otherprojects emptyPortlet" > <div class="vector-menu-heading"> In other projects </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> </div> </div> </div> </div> </nav> </div> </div> </div> <div class="vector-column-end"> <div class="vector-sticky-pinned-container"> <nav class="vector-page-tools-landmark" aria-label="Page tools"> <div id="vector-page-tools-pinned-container" class="vector-pinned-container"> </div> </nav> <nav class="vector-appearance-landmark" aria-label="Appearance"> <div id="vector-appearance-pinned-container" class="vector-pinned-container"> <div id="vector-appearance" class="vector-appearance vector-pinnable-element"> <div class="vector-pinnable-header vector-appearance-pinnable-header vector-pinnable-header-pinned" data-feature-name="appearance-pinned" data-pinnable-element-id="vector-appearance" data-pinned-container-id="vector-appearance-pinned-container" data-unpinned-container-id="vector-appearance-unpinned-container" > <div class="vector-pinnable-header-label">Appearance</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-appearance.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-appearance.unpin">hide</button> </div> </div> </div> </nav> </div> </div> <div id="bodyContent" class="vector-body" aria-labelledby="firstHeading" data-mw-ve-target-container> <div class="vector-body-before-content"> <div class="mw-indicators"> </div> <div id="siteSub" class="noprint">From Wikibooks, open books for an open world</div> </div> <div id="contentSub"><div id="mw-content-subtitle"><div class="subpages">< <bdi dir="ltr"><a href="/wiki/Consciousness_Studies" title="Consciousness Studies">Consciousness Studies</a></bdi> | <bdi dir="ltr"><a href="/wiki/Consciousness_Studies/The_Philosophical_Problem" title="Consciousness Studies/The Philosophical Problem">The Philosophical Problem</a></bdi></div></div></div> <div id="mw-content-text" class="mw-body-content"><div class="mw-content-ltr mw-parser-output" lang="en" dir="ltr"><div class="noprint"><a class="external text" href="https://en.wikibooks.org/wiki/Consciousness_studies">Home</a></div> <meta property="mw:PageProp/toc" /> <div class="mw-heading mw-heading2"><h2 id="Elementary_Information_and_Information_Systems_Theory">Elementary Information and Information Systems Theory</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&veaction=edit&section=1" title="Edit section: Elementary Information and Information Systems Theory" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=edit&section=1" title="Edit section's source code: Elementary Information and Information Systems Theory"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>When one physical thing interacts with another a change in "state" occurs. For instance, when a beam of white light, composed of a full spectrum of colours is reflected from a blue surface all colours except blue are absorbed and the light changes from white to blue. When this blue light interacts with an eye it causes blue sensitive cones to undergo a chemical change of state which causes the membrane of the cone to undergo an electrical change of state etc. The number of distinguishable states that a system can possess is the amount of information that can be encoded by the system. </p><p><span class="mw-default-size" typeof="mw:File"><a href="/wiki/File:Constudinfo1.gif" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/8/8e/Constudinfo1.gif" decoding="async" width="613" height="413" class="mw-file-element" data-file-width="613" data-file-height="413" /></a></span> </p><p>Each distinguishable state is a "bit" of information. The binary symbols "1" and "0" have two states and can be used to encode two bits of information. </p><p>The binary system is useful because it is probably the simplest encoding of information and any object can represent a binary "1". In electrical digital systems an electrical pulse represents a "1" and the absence of a pulse represents a "0". Information can be transferred from place to place with these pulses. Things that transfer information from one place to another are known as "signals". </p><p><span class="mw-default-size" typeof="mw:File"><a href="/wiki/File:Constudinfo2.gif" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/4/46/Constudinfo2.gif" decoding="async" width="663" height="473" class="mw-file-element" data-file-width="663" data-file-height="473" /></a></span> </p><p>Information is encoded by changes of state, these changes can occur over time or as variations in density, temperature, colour etc. in the three directions in space. The writing on this page is spatially encoded. </p><p>It is interesting that our spoken communication uses a narrow band of sound waves. This favours the temporal encoding of information, in other words speech is largely a one dimensional stream of symbols. In vision, somesthesis, sound location and some of the other senses the brain uses spatial encoding of information as well as encoding over time. </p><p><span class="mw-default-size" typeof="mw:File"><a href="/wiki/File:Constudinfo3.gif" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/1/1c/Constudinfo3.gif" decoding="async" width="507" height="457" class="mw-file-element" data-file-width="507" data-file-height="457" /></a></span> </p><p>The rearrangement or replacement of a set of information so that some or all of the original information becomes encoded as another set of states is known as "processing". Devices that perform these actions are known as "information processors". The brain is predominantly an information processor. </p><p><span class="mw-default-size" typeof="mw:File"><a href="/wiki/File:Constudinfo4.gif" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/a/a4/Constudinfo4.gif" decoding="async" width="698" height="379" class="mw-file-element" data-file-width="698" data-file-height="379" /></a></span> </p><p>Information systems in general have transducers that convert the state of signals in the world into signals impressed on another carrier, they then subject these signals to various processes and store them. </p><p><span class="mw-default-size" typeof="mw:File"><a href="/wiki/File:Constudcla1.gif" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/9/9e/Constudcla1.gif" decoding="async" width="544" height="604" class="mw-file-element" data-file-width="544" data-file-height="604" /></a></span> </p><p>The spatial encoding in the brain generally preserves the relation of what is adjacent to what in the sensory field. This allows the form (geometry) of stimuli to be encoded. </p><p>Information transfers in the brain occur along numerous parallel "channels" and processes occur within each channel and between channels. Phenomenal consciousness at any moment contains a continuum of simultaneous (parallel) events. Classical processes take time so phenomenal experience is likely to be, at any instant, a simultaneous output of processes, not a classical process itself. </p> <div class="mw-heading mw-heading2"><h2 id="Classification,_signs,_sense,_relations,_supervenience_etc."><span id="Classification.2C_signs.2C_sense.2C_relations.2C_supervenience_etc."></span>Classification, signs, sense, relations, supervenience etc.</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&veaction=edit&section=2" title="Edit section: Classification, signs, sense, relations, supervenience etc." class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=edit&section=2" title="Edit section's source code: Classification, signs, sense, relations, supervenience etc."><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>A <b>sign</b> is a symbol, combination of symbols such as a <b>word</b> or a combination of words. A <b>referent</b> is "...that to which the sign refers, which may be called the reference of the sign" (Frege 1892). Statements and concepts usually express relations between referents. </p><p>The <b>sense</b> of statements depends on more than the simple referents within them, for instance "the morning star is the evening star" is true in terms of the referents but dubious in terms of the sense of the morning and evening stars because the morning star is Venus as seen in the morning and the evening star is Venus as seen in the evening. So the sense of the expression "the morning star" depends on both the referent "Venus" and the referent "Morning" and probably other associations such as "sunrise", "mist" etc.. </p><p>Each sign is related to many other signs and it is these groups of relationships that provide the sense of a sign or a set of signs. A <b>relation</b> is an association between things. It can be understood in the abstract as "what is next to what". Relations occur in both time and space. When a ball bounces the impact with the floor changes the direction of the ball so "direction" is related to "impact", the ball is round so "ball" is related to "round". For instance, the morning is next to the presence of the morning star so "morning" and "morning star" are related. Relations are the connections that allow classification. </p><p>According to the physical concept of information all abstract signs are physical states of a signal and are only abstract according to whether they are related to a physical thing or exclusively to another sign. The process of treating an abstract idea as if it were a concrete thing that contains other concrete things is known as <b>reification</b>. </p><p>It is possible to have statements that have a sense but apparently no reference. As Frege put it, the words 'the celestial body most distant from the Earth' have a sense but may not have a reference. There can be classes of things that have not yet acquired any members or have no members. In a physical sense a particular <b>class</b> is a sign that refers to a particular state or set of states. Classes can be arbitrary such as "big things" being all things that have a state of being over one metre long. Classes and <b>sets</b> are very similar, sometimes sets are defined as being a class that is an element of another class. The term "set" has largely superseded the term "class" in academic publications since the mid twentieth century. </p><p>The <b>intension</b> of a set is its description or defining properties. The <b>extension</b> of a set is its members or contents. In mathematics a set is simply its members, or extension. In philosophy there is considerable discussion of the way that a given description can describe more than one thing. In other words, one intension can have several extensions. The set of things that are "tables" has the properties "legs", "flat surface" etc. The extension of "tables" is all the physical tables. The intension of "tables" may also include "stools" unless there is further clarification of the properties of "tables". Intensions are functions that identify the extensions (original members of a set) from the properties. </p><p>Classification is performed by information systems and by the information processing parts of the nervous system. A simple classification is to sort symbols according to a set of rules, for instance a simple <b>sort</b> classifies words by letter sequence. There are numerous classification systems in the visual system such as arrangements of neurons that produce a single output when a particular orientation of a line is viewed or a particular face is seen etc. The processes that identify attributes and properties of a thing are usually called <b>filters</b>. </p><p><span class="mw-default-size" typeof="mw:File"><a href="/wiki/File:Constudcla2.gif" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/c/cc/Constudcla2.gif" decoding="async" width="479" height="408" class="mw-file-element" data-file-width="479" data-file-height="408" /></a></span> </p><p>The output of filters becomes the properties of a set and specifies the relations between sets. These relations are stored as address pointers in computers or connections in the nervous system. </p><p><span class="mw-default-size" typeof="mw:File"><a href="/wiki/File:Constudcla3.gif" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/1/10/Constudcla3.gif" decoding="async" width="461" height="428" class="mw-file-element" data-file-width="461" data-file-height="428" /></a></span> </p><p>An intension uses these properties and relations to identify the things that are members of the set in the world. Clearly the more specific the filters the more accurate the intension. </p><p>A <b>database</b> is a collection of signs. A <b>fully relational</b> database is a database arranged in related sets with all relationships represented by pointers or connections. In conventional usage a <b>relational database</b> is similar but more sophisticated, redundant relationships and wasteful storage being avoided. Conventional relational databases obey "Codd's laws". An <b>hierarchical database</b> only contains pointers that point from the top of a classification hierarchy downwards. Events and persistent objects are also known as <b>entities</b>, the output of filters related to entities are known as the <b>attributes</b> of the entity. In practice a system requires an event filter to record an entity (in a computer system the event filter is usually a single data entry form and the attributes are filtered using boxes on the screen to receive typed input). </p><p><span class="mw-default-size" typeof="mw:File"><a href="/wiki/File:Constudcla4.gif" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/c/ce/Constudcla4.gif" decoding="async" width="701" height="457" class="mw-file-element" data-file-width="701" data-file-height="457" /></a></span> </p><p>In information systems design there are many ways of representing <b>classification hierarchies</b>, the most common is the <b>entity diagram</b> which assumes that the attributes of an entity define it and are stored together physically with the symbols that represent the entity. This adjacent storage is purely for convenient management of storage space and reduction of the time required for retrieval in modern computers. </p><p>Filters contain processing agents of varying degrees of sophistication from simple sorting processes to "intelligent" processes such as programs and neural networks. It is also possible to arrange filters in the world beyond an information processor. For instance, an automatic text reading machine might turn over the pages of a book to acquire a particular page. A human being might stroke an object to confirm that the texture is as it appears to be and so on. </p><p>Scientists routinely use external transducers and filters for the purpose of classification. For instance, a mass spectrometer could be used to supply details of the atomic composition of an item. External filters allow us to distinguish between things that are otherwise identical (such as two watery compounds XYZ and H2O) or to acquire properties that are unobservable with biological transducers such as the eyes and ears. The scientist plus his instruments is a single information system. In practice the referent of a set is determined by applying transducers and filters to the world and looking up the results in a relational database. If the result is the original set then a referent has been found. A sophisticated system may apply "fuzzy logic" or other methods to assign a probability that an object is truly a member of a particular set. </p><p>It is also possible to classify information according to relationships in time (i.e.: starting a car's engine is related to car moving away). Within an information system the output from the filter for "starting engine" might precede that from the filter for "starts moving". In information systems design procedures that involve successions of events can be arranged in classification structures in the same way as data; this technique is known as <b>structured programming</b> (esp. Jackson structured programming). </p><p><span class="mw-default-size" typeof="mw:File"><a href="/wiki/File:Constudstruct.gif" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/1/10/Constudstruct.gif" decoding="async" width="467" height="596" class="mw-file-element" data-file-width="467" data-file-height="596" /></a></span> </p><p>Hierarchies related to a single entity are frequently stored together as <b>objects</b> and the information processing that results is known as <b>object oriented programming</b>. A fully relational database would, in principle, contain all the objects used in a structured information system. In Part III the storage and sequential retrieval of related functions in the brain is described. </p><p>It has been pointed out by (McCarthy and Hayes (1969)) that an information processor that interacts with the environment will be producing continuous changes in all of its classifications (such as position etc.) and also changes in theories (structured programs that are predictive processes) about the world. In a serial processor, such as a Turing Machine with a one dimensional tape, the presence of changes in the world would create a huge burden on the machine. In a parallel processor, such as a biological neural network, the reclassifications should be straightforward. The problem of adapting an information system to changes in the world, most of which have little effect on the processes performed by the system, is known as the <b>frame problem</b>. The frame problem is usually stated in a form such as "how is it possible to write formulae that describe the effects of actions without having to write a large number of accompanying formulae that describe the mundane, obvious non-effects of those actions?" (Shanahan 2004). </p><p>Chalmers(1996) introduced the terms <b>primary intension</b> and <b>secondary intension</b>. Primary intension is a high level description where the properties of a set may be insufficient to specify the contents of the set in the physical world. For instance, the term "watery" might specify several liquids with various compositions. Secondary intension is specific so that it applies to one substance in the world (H2O). In the context of information systems primary intensions differ from secondary intensions as a result of inadequate filtering and classification. (See note below for details of Putnam's twin earth thought experiement). </p><p>The problem of matching the properties and relations of an item in a relational database with an item in the world involves the problem of <b>supervenience</b>. Supervenience occurs when the properties and relations in the database for an item are the same as the output from filters applied to the item. In other words, <b>in an information system information does not supervene directly on a thing, it supervenes on information derived from the thing</b>. Chalmers described supervenience in terms that are accessible to an information systems approach: </p><p><span style="font-family:'times new roman',serif;">"The properties of A supervene on the properties of B if no two possible situations are identical with respect to the properties of A while differing with respect to the properties of B (after Chalmers 1996)."</span> </p><p>In terms of information processing the <i>properties</i> are changes in state derived from a transducer that are subject to classification with a filter. The properties of a predictive program would supervene on the input from transducers applied to an object if it correctly identified the sets and sequence of sets that are discovered at all times. </p><p>Information theory is consistent with <b>physicalism</b>. Philosophers coined the term physicalism to describe the argument that there are only physical things. In <b>token physicalism</b> every event is held to be a physical event and in <b>type physicalism</b> every property of a mental event is held to have a corresponding property of a physical event. Token physicalism is consistent with information theory because every bit of information is a part of an arrangement of a physical substrate and hence a physical event. Type physicalism would be consistent with information theory if it is held that mental events are also arrangements of substrates. It is sometimes held that the existence of abstract mental entities means that token physicalism does not correspond to type physicalism. In terms of information theory abstract entities would be derived sets of information that are arrangements of substrates. Hence information theory does not distinguish between type and token physicalism. </p><p>The reader should be cautioned that there is an extensive literature associated with supervenience that does not stress the way that information is embodied and representational. (The removal of these constraints will lead to non-physical theories of information). </p><p>It is sometimes asked how conscious experience containing a quale that is a colour, such as blueness, can supervene on the physical world. In terms of information systems the question is back to front: blueness is very probably a phenomenon in the physical brain - it is certainly unlike an arrangement of stored bits in an information system. The question should read "what physical theory supervenes on information in the signals related to the phenomenon called blue?" </p><p>The simple answer is that there is no widely accepted description available of the physical nature of the experience called blue (there are several theories however). A common mistake is to say that the secondary intension of the quale blue is known - this is not the case, the physical basis of em radiation or absorption of light is known to some extent but these are almost certainly not the physical basis of the "blue" of experience. The quale "blue" is probably a particular substrate that has a state, not an encoded state on a generalised substrate. </p><p>Information is the patterns and states of an underlying substrate or carrier, this leaves us with exciting questions such as: what is it like to be the substrate itself rather than simply the information impressed upon it? Can only particular substrates constitute conscious experience? How can we relate the properties of this experience to information about the physical world? </p><p>The substrate of information is not part of the problem of access consciousness which deals with the problem of the flow of information from place to place. </p><p>Frege, G. (1892) On Sense and Reference. <a class="external free" href="https://en.wikisource.org/wiki/On_Sense_and_Reference">http://en.wikisource.org/wiki/On_Sense_and_Reference</a> </p><p>Pruss, A.R. (2001) The Actual and the Possible. in Richard M. Gale (ed.), Blackwell Guide to Metaphysics, Oxford: Blackwell. <a rel="nofollow" class="external free" href="http://www.georgetown.edu/faculty/ap85/papers/ActualAndPossible.html">http://www.georgetown.edu/faculty/ap85/papers/ActualAndPossible.html</a> </p><p>Menzies, P (2001). Counterfactual Theories of Causation. Stanford Encyclopedia of Philosophy <a rel="nofollow" class="external free" href="http://plato.stanford.edu/entries/causation-counterfactual/">http://plato.stanford.edu/entries/causation-counterfactual/</a> </p><p>McCarthy, J. and Hayes, P.J. (1969), "Some Philosophical Problems from the Standpoint of Artificial Intelligence", Machine Intelligence 4, ed. D.Michie and B.Meltzer, Edinburgh: Edinburgh University Press, pp. 463–502. </p><p>Shanahan, M. (2004) "The frame problem". Stanford Encyclopedia of Philosophy. <a rel="nofollow" class="external free" href="http://plato.stanford.edu/entries/frame-problem/">http://plato.stanford.edu/entries/frame-problem/</a> </p> <div class="mw-heading mw-heading2"><h2 id="The_construction_of_filters:_Bayesian_and_Neural_Network_models">The construction of filters: Bayesian and Neural Network models</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&veaction=edit&section=3" title="Edit section: The construction of filters: Bayesian and Neural Network models" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=edit&section=3" title="Edit section's source code: The construction of filters: Bayesian and Neural Network models"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p><i>This is a <b>stub</b> and needs expanding</i> </p> <div class="mw-heading mw-heading2"><h2 id="Qualia_and_Information">Qualia and Information</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&veaction=edit&section=4" title="Edit section: Qualia and Information" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=edit&section=4" title="Edit section's source code: Qualia and Information"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The problem of the generalised nature of information is addressed by several "thought experiments" which are described below. </p><p>The problem of "intensions" is tackled in Putnam's twin earth thought experiment which was discussed above but is given in more detail below. </p> <div class="mw-heading mw-heading3"><h3 id="Absent_and_fading_qualia">Absent and fading qualia</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&veaction=edit&section=5" title="Edit section: Absent and fading qualia" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=edit&section=5" title="Edit section's source code: Absent and fading qualia"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <div class="mw-heading mw-heading4"><h4 id="Absent_qualia">Absent qualia</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&veaction=edit&section=6" title="Edit section: Absent qualia" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=edit&section=6" title="Edit section's source code: Absent qualia"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Block (1978) argued that the same functions can be performed by a wide range of systems. For instance, if the population of China were equipped with communication devices and a set of rules they could perform almost any function but would they have qualia? The argument considers the fact that systems which process information can be constructed of a wide range of materials and asks whether such systems will also have qualia (see illustration below). </p><p><span class="mw-default-size" typeof="mw:File"><a href="/wiki/File:Constudfun.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/f/f9/Constudfun.png" decoding="async" width="474" height="563" class="mw-file-element" data-file-width="474" data-file-height="563" /></a></span> </p><p>This argument also occurs when the physical structure of computing devices is considered, for instance a computing machine could be constructed from rolling steel balls. Would the steel balls at one instant possess the quale 'blue' and then, as a result of the movement of one ball to another position, possess the quale 'red'? Can an arrangement of balls really have qualia or are they absent? It is incumbent upon proponents of functional organisation to describe why identical balls arranged as O O OOO can be the quale red and yet those arranged as OOO O O can be the quale blue. They must also take into account Kant's "handedness problem": the balls OOO O O look like O O OOO when viewed from behind. Red and blue, as arrangements of things, would be identical depending on the viewing point. How can a processor have a viewing point when it is itself the steel balls? </p><p><span class="mw-default-size" typeof="mw:File"><a href="/wiki/File:Constudhanded.gif" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/f/fc/Constudhanded.gif" decoding="async" width="466" height="245" class="mw-file-element" data-file-width="466" data-file-height="245" /></a></span> </p> <div class="mw-heading mw-heading4"><h4 id="Fading_qualia">Fading qualia</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&veaction=edit&section=7" title="Edit section: Fading qualia" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=edit&section=7" title="Edit section's source code: Fading qualia"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Pylyshyn (1980) introduced a thought experiment in which a human brain is progressively replaced by synthetic components and it is asked what would happen to consciousness during this replacement of the brain. </p><p>Chalmers (1996) considers the problem in depth from the point of view of functional organisation. (i.e.: considering replacement of biological components with components that perform the same functions). The argument is straightforward: if phenomenal consciousness is due to functional organisation then replacement of biological parts with artificial parts that duplicate the function should allow phenomenal consciousness to continue. </p><p>But suppose phenomenal consciousness is not due to functional organisation. What would we expect then? </p><p>Chalmers argues that consciousness could not suddenly disappear during replacement of the brain because functions could be replaced in tiny stages so unless qualia could reside in a single tiny place in the brain <b>Disappearing qualia</b> would be ruled out. </p><p>Chalmers considers the alternative idea of <b>fading qualia</b>, where slow replacement of parts reduces experience progressively. This "fading" is described in terms of qualia fading from red to pink and experience in general becoming more and more out of step with the world. Chalmers dismisses the idea of fading qualia on the grounds that people do not have abnormal experiences, like fading colours, except in the case of pathology. More specifically, he argues that since it seems intuitively obvious that silicon implants could be devised to stand in, in any relevant functional role, for the original brain matter, we might reasonably assume that during the carbon - silicon transformation the organism's functional state, including all its dispositions to notice and report what experiences it is having, can be preserved. The absurd consequence is then supposed to consist in a being whose qualia have significantly faded continuing to report them as they originally were; without noticing the change. </p><p>Crabb(2005) has argued that there are hidden premises in this argument, and once these are exposed the desired conclusion is seen to be unwarranted. Thus, consider the assumption that during the silicon implantation process the person's functional state can be preserved in any relevant respect. This is very likely the case. Certainly, we have no <i>a priori</i> reason for ruling out the possibility; for surely technology might be employed to achieve any functional state we desire. In principle, then, it just has to be possible to preserve such functional traits as the noticing and reporting of the original qualia. But then, as Crabb observes, the alleged absurdity of issuing such reports in the presence of <i>severly faded</i> qualia depends on a further assumption; that during the implantation process the noticing and reporting functions have been preserved in such a way that we should still expect that noticing and reporting to remain fairly accurate. Chalmers completely overlooks this requirement. In effect, then, he is arguing in a circle. He is arguing that faded qualia in the presence of the original functional states are very unlikely, because a conscious being will tend to track its own conscious states fairly accurately. Why? Because the preservation of the original functional states during the implantation process is of the sort required to preserve the faithfulness of the subject's tracking. How do we know this? Well, because it is just generally true to say that a conscious being would be able, in respect of noticing and reporting, to track its conscious states. In short, then, he is saying that qualia could not fade with functional states intact, because in general that just could not happen. </p><p>Consider the following example. The original human subject Joe starts out seeing red things and experiencing vivid red qualia. He reports them as such. Then an evil scientist implants a device between Joe's visual cortex and his speech centre which effectively overrides the output from the <i>red zone</i> of the visual cortex, and ensures that come what may experientially, Joe will report that his qualia are vivid. We could assume a similar intervention has also been effected at the <i>noticing centre</i>, whatever that might be. Plausibly, then, Joe will continue to notice and report vivid qualia even though his own are severely faded. Now Crabb's question is this: why would Chalmers assume that the item-for-item silicon substitutions he envisaged would not themselves allow this sort of noticing and reporting infidelity? And unless he can provide a good reason, his thought experiment with Joe and his fading qualia simply does not work. Of course the functional states can be preserved during the silicon substitutions, but we have no reason to suppose that noticing and reporting fidelity can too. Consequently, there is no inference to an absurd situation, and therefore no reason to reject the possibility of fading qualia. </p><p>It is possible that at some stage during the replacement process the synthetic parts alone would have sufficient data to identify objects and properties of objects so that the experience would be like blindsight. The subject might be amazed that subjective vision was disappearing. However, Chalmers denies that new beliefs, such as amazement at a new state, would be possible. He says that: </p><p><span style="font-family:'times new roman',serif;">"Nothing in the physical system can correspond to that amazement. There is no room for new beliefs such as "I can't see anything," new desires such as the desire to cry out, and other new cognitive states such as amazement. Nothing in the physical system can correspond to that amazement."</span> </p><p>On the basis of the impossibility of new beliefs Chalmers concludes that fading qualia are impossible. Again, though, he has failed to explain why he thinks the original belief set can be preserved come what may, and in such a way as to preserve belief and reporting fidelity. </p><p>Notwithstanding these objections, then, according to Chalmers, if fading qualia do not occur then qualia must also exist in "Robot", a totally synthetic entity, so <b>absent qualia</b> do not occur either. Therefore, Robot should be conscious. He concludes the fading qualia argument by stating that it supports his theory that consciousness results from <i>organizational invariance</i>, a specific set of functions organised in a particular way: </p><p><span style="font-family:'times new roman',serif;">"The invariance principle taken alone is compatible with the solipsistic thesis that my organization gives rise to experience. But one can imagine a gradual change to my organization, just as we imagined a gradual change to my physical makeup, under which my beliefs about my experience would be mostly preserved throughout, I would remain a rational system, and so on. For similar reasons to the above, it seems very likely that conscious experience would be preserved in such a transition"</span> </p><p>The response to this should now be obvious. What exactly does remaining 'a rational system' entail? If it entails the preservation of noticing and reporting fidelity, then it follows that Joe's qualia would not fade. But there is no independent support for this entailment. It remains perfectly reasonable to assume that Joe's qualia would fade, and therefore that the only way he could end up misreporting his fading qualia as bright would be through a breakdown in fidelity, of the sort Crabb describes. </p><p>Chalmers notes that if qualia were epiphenomenal and not due to functional organisation then the argument would be false. This is rather unfortunate because it makes the argument tautological: if it is assumed that conscious experience is due to functional organisation then the argument shows that conscious experience is due to functional organisation. The role of epiphenomenal, or apparently epiphenomenal, consciousness brings the philosopher back to the problem of change, where consciousness does not appear to be necessary for change (functions) but change does not seem to be possible without consciousness. </p><p>There are other interesting questions related to the fading qualia argument, for instance: Can all of organic chemistry be replaced by inorganic chemistry - if not why not? If information always has a physical substrate and conscious experience is the arrangement of that substrate then how could conscious experience be the same if the substrate is replaced? At the level of molecular and atomic interactions almost all functions involve electromagnetic fields, if identical function is achieved at scales below the size of an organelle in a cell in the brain would the functional elements, such as electromagnetic fields, have been changed? (i.e.: is the replacement feasible or would it be necessary to use organic parts to replace organic parts at small scales?). </p><p>The reader may have spotted that Chalmers' fading qualia argument is very similar to Dennett's argument about the non-existence of qualia. In Dennett's argument qualia are dubiously identified with judgements and then said to be non-existent. In Chalmer's argument an attempt is made to identify qualia with beliefs about qualia so they can be encompassed by a functionalist theory. </p><p>The reader may also have noticed that the argument, by using microscopic progressive replacement, preserves the form of the brain. The replacement is <i>isomorphic'</i> but it is not explained anywhere why form should need to be preserved as well as function. To examine functionalism the argument should allow each replacement module to be of any size and placed anywhere in the world. Furthermore, it should be possible for the functions to be asynchronous. But the argument is not a simple examination of functionalism. If form is important why is it important? Would a silicon replacement necessarily be able to achieve the same four dimensional form as the organic original? </p><p>Pylyshyn, Z. (1980) The "causal power" of machines. Behavioral and Brain Sciences 3:442-444. </p><p>Chalmers, D.J. (1996). The Conscious Mind. Oxford University Press. </p><p>Chalmers, D.J. Facing Up to the Problem of Consciousness (summary of above at <a rel="nofollow" class="external free" href="http://cogprints.org/316/00/consciousness.html">http://cogprints.org/316/00/consciousness.html</a>). </p><p>Crabb, B.G. (2005) "Fading and Dancing Qualia - Moving and Shaking Arguments", Deunant Books </p> <div class="mw-heading mw-heading3"><h3 id="Putnam's_twin_earth_thought_experiment"><span id="Putnam.27s_twin_earth_thought_experiment"></span>Putnam's twin earth thought experiment</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&veaction=edit&section=8" title="Edit section: Putnam's twin earth thought experiment" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=edit&section=8" title="Edit section's source code: Putnam's twin earth thought experiment"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The original Twin Earth thought experiment was presented by philosopher Hilary Putnam in his important 1975 paper "The Meaning of 'Meaning'", as an early argument for what has subsequently come to known as semantic externalism. Since that time, philosophers have proposed a number of variations on this particular thought experiment, which can be collectively referred to as Twin Earth thought experiments. </p><p>Putnam's original formulation of the experiment was this: </p> <dl><dd>We begin by supposing that elsewhere in the universe there is a planet exactly like earth in virtually all respects, which we refer to as ‘Twin Earth’. (We should also suppose that the relevant surroundings of Twin Earth are identical to those of earth; it revolves around a star that appears to be exactly like our sun, and so on.) On Twin Earth there is a Twin equivalent of every person and thing here on Earth. The one difference between the two planets is that there is no water on Twin Earth. In its place there is a liquid that is superficially identical, but is chemically different, being composed not of H2O, but rather of some more complicated formula which we abbreviate as ‘XYZ’. The Twin Earthlings who refer to their language as ‘English’ call XYZ ‘water’. Finally, we set the date of our thought experiment to be several centuries ago, when the residents of Earth and Twin Earth would have no means of knowing that the liquids they called ‘water’ were H2O and XYZ respectively. The experience of people on Earth with water, and that of those on Twin Earth with XYZ would be identical.</dd></dl> <p>Now the question arises: when an earthling, say Oscar, and his twin on Twin Earth (also called 'Oscar' on his own planet, of course. Indeed, the inhabitants of that planet necessarily call their own planet 'earth'. For convenience, we refer to this putative planet as 'Twin Earth', and extend this naming convention to the objects and people that inhabit it, in this case referring to Oscar's twin as Twin-Oscar, or Toscar.) say 'water' do they mean the same thing? Ex hypothesi, their brains are molecule-for-molecule identical. Yet, at least according to Putnam, when Oscar says water, the term refers to H2O, whereas when Toscar says 'water' it refers to XYZ. The result of this is that the contents of a persons brain are not sufficient to determine the reference of terms he uses, as one must also examine the causal history that led to his acquiring the term. (Oscar, for instance, learned the word 'water' in a world filled with H2O, whereas Toscar learned 'water' in a world filled with XYZ.) This is the essential thesis of semantic externalism. Putnam famously summarized this conclusion with the statement that "meaning just ain't in the head." </p><p>In terms of physical information systems such as occur in the brain this philosophical argument means that if there are inadequate external filters available the information system will confuse XYZ with H2O; it will conclude that they are the same thing and have no difference in meaning. For the information system meaning is in the classification structures assigned by the system. If the system is provided with better transducers and filters then new meanings will arise within the system. However, for an information system 'meaning' is no more than a chain of relations because this is the nature of information (i.e.: arrangements of an arbitrary carrier). Other types of meaning would require phenomena other than simple information processing. </p><p>In Putnam's thought experiment the world can be different but the meaning for the individual is the same if the brain is the same. If there is a type of meaning other than a chain of relations would Putnam's experiment suggest that this type of 'meaning' occurs as a phenomenon in the brain or in the world beyond the body? </p><p>Putnam, H. (1975/1985) The meaning of 'meaning'. In Philosophical Papers, Vol. 2: Mind, Language and Reality. Cambridge University Press. </p> <div class="mw-heading mw-heading3"><h3 id="The_Inverted_Qualia_Argument">The Inverted Qualia Argument</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&veaction=edit&section=9" title="Edit section: The Inverted Qualia Argument" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=edit&section=9" title="Edit section's source code: The Inverted Qualia Argument"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The possibility that we may each experience different colours when confronted by a visual stimulus is well known and was discussed by John Locke. In particular the idea of <b>spectrum inversion</b> in which the spectrum is exchanged, blue for red and so on is often considered. It is then asked whether the subject of such an exchange would notice any difference. Unfortunately it turns out that colour is not solely due to the spectrum and depends on hue, saturation and lightness. If the colours are inverted all the axes of colour would need to be exchanged and the relations between the colours would indeed still be discernably different. </p><p>Some philosophers have tried to avoid this difficulty by asking questions about qualia when the subject has no colour vision. For instance, it is asked whether a subject who saw things in black and white would see the world differently from one who saw the world in white and black. </p><p>This sort of discussion has been used as an attack on Behaviourism where it is argued that whether a tomato is seen as black or white the subject's behaviour towards the tomato will be the same. So subject's can have mental states independent of behaviours. </p><p>Block (1990) has adapted this argument to an <i>inverted earth</i> scenario in which it is proposed that a subject goes to another planet which is identical to earth except for the inversion of visual qualia. He points out that behaviours would adjust to be the same on the inverted earth as on the actual earth. All functions would be identical but the mental state would be different so it is concluded that mental states are not processes. </p><p>Chalmers(1996) approaches this argument by assuming that the absent and fading qualia arguments have proven his idea of organisational invariance. He then introduces the idea that conscious experience only exists for the durationless instant and notes that, given these assumptions a person would not be aware that the quale red had been switched for the quale blue. </p><p><span style="font-family:'times new roman';">"My experiences are switching from red to blue, but <i>I do not notice any change</i>. Even as we flip the switch a number of times and my qualia dance back and forth, I will simply go about my business, noticing nothing unusual."</span> </p><p>Block, N. (1990). Inverted Earth, Philosophical Perspectives, 4: 53–79. </p><p>See also: Block, N. Qualia. <a rel="nofollow" class="external free" href="http://www.nyu.edu/gsas/dept/philo/faculty/block/papers/qualiagregory.pdf">http://www.nyu.edu/gsas/dept/philo/faculty/block/papers/qualiagregory.pdf</a> Byrne, A. (2004). Inverted Qualia. Stanford Encyclopedia of Philosophy. <a rel="nofollow" class="external free" href="http://plato.stanford.edu/entries/qualia-inverted/">http://plato.stanford.edu/entries/qualia-inverted/</a> Shoemaker, S. (2002). CONTENT, CHARACTER, AND COLOR II: A BETTER KIND OF REPRESENTATIONALISM Second Whitehead Lecture. <a rel="nofollow" class="external free" href="http://web.archive.org/20040306235426/humanities.ucsc.edu/NEH/shoemaker2.htm">http://web.archive.org/20040306235426/humanities.ucsc.edu/NEH/shoemaker2.htm</a> </p> <div class="mw-heading mw-heading3"><h3 id="The_Knowledge_Argument">The Knowledge Argument</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&veaction=edit&section=10" title="Edit section: The Knowledge Argument" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=edit&section=10" title="Edit section's source code: The Knowledge Argument"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Much of the philosophical literature about qualia has revolved around the debate between physicalism and non-physicalism. In 1982 Frank Jackson proposed the famous "Knowledge Argument" to highlight how physical knowledge might not be enough to describe phenomenal experience: </p><p><span style="font-family:'times new roman',serif;">"Mary is a brilliant scientist who is, for whatever reason, forced to investigate the world from a black and white room via a black and white television monitor. She specializes in the neurophysiology of vision and acquires, let us suppose, all the physical information there is to obtain about what goes on when we see ripe tomatoes, or the sky, and use terms like ‘red’, ‘blue’, and so on. She discovers, for example, just which wavelength combinations from the sky stimulate the retina, and exactly how this produces via the central nervous system the contraction of the vocal chords and expulsion of air from the lungs that results in the uttering of the sentence ‘The sky is blue’. (It can hardly be denied that it is in principle possible to obtain all this physical information from black and white television, otherwise the Open University would of necessity need to use color television.)</span> </p><p><span style="font-family:'times new roman',serif;">What will happen when Mary is released from her black and white room or is given a color television monitor? Will she learn anything or not? It seems just obvious that she will learn something about the world and our visual experience of it. But then it is inescapable that her previous knowledge was incomplete. But she had all the physical information. Ergo there is more to have than that, and Physicalism is false.</span> Jackson (1982). </p><p>The Knowledge argument is a category mistake because a description of the universe, such as information about science, is a set of symbols in a particular medium such as ink on paper. These symbols provide the recipe for experiments and other manipulations of nature, and predict the outcome of these manipulations. The manipulations of nature are not the same as the set of symbols describing how to perform these manipulations. Scientific information is not the world itself and the truth or falsehood of Physicalism is unaffected by the knowledge argument. </p><p>If the Knowledge Argument is interpreted as an argument about whether information about the nature of the colour red could ever be sufficient to provide the experience that we call red then it becomes more relevant to the problem of consciousness but it is then a debate about whether information processors could be conscious, this is covered below. Those interested in a full discussion of the Knowledge Argument should consult Alter (1998) and especially the link given with this reference. </p> <div class="mw-heading mw-heading2"><h2 id="The_problem_of_machine_and_digital_consciousness">The problem of machine and digital consciousness</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&veaction=edit&section=11" title="Edit section: The problem of machine and digital consciousness" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=edit&section=11" title="Edit section's source code: The problem of machine and digital consciousness"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <div class="mw-heading mw-heading3"><h3 id="Information_processing_and_digital_computers">Information processing and digital computers</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&veaction=edit&section=12" title="Edit section: Information processing and digital computers" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=edit&section=12" title="Edit section's source code: Information processing and digital computers"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Information processing consists of encoding a state, such as the geometry of an image, on a carrier such as a stream of electrons, and then submitting this encoded state to a series of transformations specified by a set of instructions called a program. In principle the carrier could be anything, even steel balls or onions, and the machine that implements the instructions need not be electronic, it could be mechanical or fluidic. </p><p>Digital computers implement information processing. From the earliest days of digital computers people have suggested that these devices may one day be conscious. One of the earliest workers to consider this idea seriously was Alan Turing. Turing proposed the <a href="/wiki/Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness/Turing_Test" title="Consciousness Studies/The Philosophical Problem/Machine Consciousness/Turing Test">Turing Test</a> as a way of discovering whether a machine can think. In the Turing Test a group of people would ask a machine questions and if they could not tell the difference between the replies of the machine and the replies of a person it would be concluded that the machine could indeed think. Turing's proposal is often confused with the idea of a test for consciousness. However, phenomenal consciousness is an internal state so the best that such a test could demonstrate is that a digital computer could simulate consciousness. </p><p>If technologists were limited to the use of the principles of digital computing when creating a conscious entity they would have the problems associated with the philosophy of 'strong' artificial intelligence. The term <i>strong AI</i> was defined by Searle: </p> <pre>..according to strong AI, the computer is not merely a tool in the study of the mind; rather, the appropriately programmed computer really is a mind (J. Searle in Minds, Brains and Programs. The Behavioral and Brain Sciences, vol. 3, 1980). </pre> <p>If a computer could demonstrate Strong AI it would not necessarily be more powerful at calculating or solving problems than a computer that demonstrated Weak AI. </p><p>The most serious problem with Strong AI is John Searle's "chinese room argument" in which it is demonstrated that the contents of an information processor have no intrinsic meaning -at any moment they are just a set of electrons or steel balls etc. The argument is reproduced in full below: </p><p><span style="font-family:'times new roman',serif;">"One way to test any theory of the mind is to ask oneself what it would be like if my mind actually worked on the principles that the theory says all minds work on. Let us apply this test to the Schank program with the following Gedankenexperiment. Suppose that I’m locked in a room and given a large batch of Chinese writing. Suppose furthermore (as is indeed the case) that I know no Chinese, either written or spoken, and that I’m not even confident that I could recognize Chinese writing as Chinese writing distinct from, say, Japanese writing or meaningless squiggles. To me, Chinese writing is just so many meaningless squiggles. Now suppose further that after this first batch of Chinese writing I am given a second batch of Chinese script together with a set of rules for correlating the second batch with the first batch. The rules are in English, and I understand these rules as well as any other native speaker of English. They enable me to correlate one set of formal symbols with another set of formal symbols, and all that "formal" means here is that I can identify the symbols entirely by their shapes. Now suppose also that I am given a third batch of Chinese symbols together with some instructions, again in English, that enable me to correlate elements of this third batch with the first two batches, and these rules instruct me how to give back certain Chinese symbols with certain sorts of shapes in response to certain sorts of shapes given me in the third batch. Unknown to me, the people who are giving me all of these symbols call the first batch a "script," they call the second batch a "story," and they call the third batch "questions." Furthermore, they call the symbols I give them back in response to the third batch "answers to the questions," and the set of rules in English that they gave me, they call the "program." Now just to complicate the story a little, imagine that these people also give me stories in English, which I understand, and they then ask me questions in English about these stories, and I give them back answers in English. Suppose also that after a while I get so good at following the instructions for manipulating the Chinese symbols and the programmers get so good at writing the programs that from the external point of view—that is, from tile point of view of somebody outside the room in which I am locked—my answers to the questions are absolutely indistinguishable from those of native Chinese speakers. Nobody just looking at my answers can tell that I don't speak a word of Chinese. Let us also suppose that my answers to the English questions are, as they no doubt would be, indistinguishable from those of other native English speakers, for the simple reason that I am a native English speaker. From the external point of view—from the point of view of someone reading my "answers"—the answers to the Chinese questions and the English questions are equally good. But in the Chinese case, unlike the English case, I produce the answers by manipulating uninterpreted formal symbols. As far as the Chinese is concerned, I simply behave like a computer; I perform computational operations on formally specified elements. For the purposes of the Chinese, I am simply an instantiation of the computer program."</span> </p><p>In other words, Searle is proposing that if a computer is just an arrangement of steel balls or electric charges then its content is meaningless without some other phenomenon. Block (1978) used the analogy of a system composed of the population of China communicating with each other to suggest the same idea, that an arrangement of identical things has no meaningful content without a conscious observer who understands its form. </p><p>Searle's objection does not convince Direct Realists because they would maintain that 'meaning' is only to be found in objects of perception. </p> <div class="mw-heading mw-heading3"><h3 id="The_meaning_of_meaning_and_the_Symbol_Grounding_Problem">The meaning of meaning and the Symbol Grounding Problem</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&veaction=edit&section=13" title="Edit section: The meaning of meaning and the Symbol Grounding Problem" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=edit&section=13" title="Edit section's source code: The meaning of meaning and the Symbol Grounding Problem"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>In his Chinese Room Argument Searle shows that symbols on their own do not have any meaning. In other words, a computer that is a set of electrical charges or flowing steel balls is just a set of steel balls or electrical charges. Leibniz spotted this problem in the seventeenth century. </p><p>Searle's argument is also, partly, the <b>Symbol Grounding Problem</b>; Harnad (2001) defines this as: </p><p><span style="font-family:'times new roman',serif;">"the symbol grounding problem concerns how the meanings of the symbols in a system can be grounded (in something other than just more ungrounded symbols) so they can have meaning independently of any external interpreter."</span> </p><p>Harnad defines a Total Turing Test in which a robot connected to the world by sensors and actions might be judged to be indistinguishable from a human being. He considers that a robot that passed such a test would overcome the symbol grounding problem. Unfortunately Harnad does not tackle Leibniz's misgivings about the internal state of the robot being just a set of symbols (cogs and wheels/charges etc.). The Total Turing Test is also doubtful if analysed in terms of information systems alone, for instance, Powers (2001) argues that an information system could be grounded in Harnad's sense if it were embedded in a virtual reality rather than the world around it. </p><p>So what is "meaning" in an information system? In information systems a <b>relation</b> is defined in terms of what thing contains another thing. Having established that one thing contains another this thing is called an <b>attribute</b>. A car contains seats so seats are an attribute of cars. Cars are sometimes red so cars sometimes have the attribute "red". This containing of one thing by another leads to classification hierarchies known as a relational database. What Harnad was seeking to achieve was a connection between items in the database and items in the world outside the database. This did not succeed in giving "meaning" to the signals within the machine - they were still a set of separate signals in a materialist model universe. </p><p>Aristotle and Plato had a clear idea of meaning when they proposed that ideas depend upon internal images or forms. Plato, in particular conceived that understanding is due to the forms in phenomenal consciousness. Bringing this view up to date, this implies that the way one form contains another gives us understanding. The form of a car contains the form we call seats etc. Even things that we consider to be "content" rather than "form", such as redness, require an extension in space so that there is a red area rather than red by itself (cf: Hume 1739). So if the empiricists are correct our minds contain a geometrical classification system ("what contains what") or geometrical relational database. </p><p>A geometrical database has advantages over a sequential database because items within it are highly classified (their relations to other items being implicit in the geometry) and can also be easily related to the physical position of the organism in the world. It would appear that the way forward for artificial consciousness would be to create a virtual reality within the machine. Perhaps the brain works in this fashion and dreams, imagination and hallucinations are evidence for this. In Part III the storage of geometrically related information in the "Place" area of the brain is described. But although this would be closer to our experience it still leaves us with the Hard Problem of how the state of a model could become conscious experience. </p> <ul><li>Harnad, S. (2001). Grounding Symbols in the Analog World With Neural Nets—a Hybrid Model, Psycoloquy: 12,#34 <a rel="nofollow" class="external free" href="http://psycprints.ecs.soton.ac.uk/archive/00000163/#html">http://psycprints.ecs.soton.ac.uk/archive/00000163/#html</a></li> <li>Powers, D.M.W. (2001) A Grounding of Definition, Psycoloquy: 12,#56 <a rel="nofollow" class="external free" href="http://psycprints.ecs.soton.ac.uk/archive/00000185/#html">http://psycprints.ecs.soton.ac.uk/archive/00000185/#html</a></li></ul> <div class="mw-heading mw-heading3"><h3 id="Artificial_consciousness_beyond_information_processing">Artificial consciousness beyond information processing</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&veaction=edit&section=14" title="Edit section: Artificial consciousness beyond information processing" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=edit&section=14" title="Edit section's source code: Artificial consciousness beyond information processing"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The debate about whether a machine could be conscious under any circumstances is usually described as the conflict between physicalism and dualism. Dualists believe that there is something non-physical about consciousness whilst physicalists hold that all things are physical. </p><p>Physicalists are not limited to those who hold that consciousness is a property of encoded information on carrier signals. Several indirect realist philosophers and scientists have proposed that, although information processing might deliver the content of consciousness, the state that is consciousness is due to some other physical phenomenon. The eminent neurologist Wilder Penfield was of this opinion and scientists such as Arthur Stanley Eddington, Roger Penrose, Herman Weyl, Karl Pribram and Henry Stapp amongst many others have also proposed that consciousness involves physical phenomena subtler than information processing. Even some of the most ardent supporters of consciousness in information processors such as Dennett suggest that some new, emergent, scientific theory may be required to account for consciousness. </p><p>As was mentioned above, neither the ideas that involve direct perception nor those that involve models of the world in the brain seem to be compatible with current physical theory. It seems that new physical theory may be required and the possibility of dualism is not, as yet, ruled out. </p> <div class="mw-heading mw-heading3"><h3 id="The_Computability_Problem_and_Halting_of_Turing_Machines">The Computability Problem and Halting of Turing Machines</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&veaction=edit&section=15" title="Edit section: The Computability Problem and Halting of Turing Machines" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=edit&section=15" title="Edit section's source code: The Computability Problem and Halting of Turing Machines"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <div class="mw-heading mw-heading4"><h4 id="The_Church-Turing_thesis">The Church-Turing thesis</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&veaction=edit&section=16" title="Edit section: The Church-Turing thesis" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=edit&section=16" title="Edit section's source code: The Church-Turing thesis"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>In computability theory the Church–Turing thesis, Church's thesis, Church's conjecture or Turing's thesis, named after Alonzo Church and Alan Turing, is a hypothesis about the nature of mechanical calculation devices, such as electronic computers. The thesis claims that any calculation that is possible can be performed by an algorithm running on a computer, provided that sufficient time and storage space are available. </p><p>This thesis, coupled with the proposition that all computers can be modelled by Turing Machines, means that Functionalist theories of consciousness are equivalent to the hypothesis that the brain operates as a Turing Machine. </p> <div class="mw-heading mw-heading4"><h4 id="Turing_machines">Turing machines</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&veaction=edit&section=17" title="Edit section: Turing machines" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=edit&section=17" title="Edit section's source code: Turing machines"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>A <a class="external text" href="https://en.wikipedia.org/wiki/Turing_machine">Turing Machine</a> is a pushdown automaton made more powerful by relaxing the last-in-first-out requirement of its stack. (Interestingly, this seemingly minor relaxation enables the Turing machine to perform such a wide variety of computations that it can serve as a model for the computational capabilities of all modern computer software.) </p><p>A Turing machine can be constructed using a single tape. There is no requirement for data to be arranged congruently with input or output data so a two dimensional square in the world would be handled as a string or set of strings in the machine yet still calculate a known function. This is problematic in consciousness studies because phenomenal consciousness has many things simultaneously present in several directions at an instant and this form is not congruent with a one dimensional tape. </p><p>A Turing machine consists of: </p> <ol><li>A <i>tape</i> which is divided into cells, one next to the other. Each cell contains a symbol from some finite alphabet. The alphabet contains a special <i>blank</i> symbol (here written as '0') and one or more other symbols. The tape is assumed to be arbitrarily extendible to the left and to the right, i.e., the Turing machine is always supplied with as much tape as it needs for its computation. Cells that have not been written to before are assumed to be filled with the blank symbol.</li> <li>A <i>head</i> that can read and write symbols on the tape and move left and right.</li> <li>A <i>state register</i> that stores the state of the Turing machine. The number of different states is always finite and there is one special <i>start state</i> with which the state register is initialized.</li> <li>An <i>action table</i> (or <i>transition function</i>) that tells the machine what symbol to write, how to move the head ('L' for one step left, and 'R' for one step right) and what its new state will be, given the symbol it has just read on the tape and the state it is currently in. If there is no entry in the table for the current combination of symbol and state then the machine will halt.</li></ol> <p>Note that every part of the machine is finite; it is the potentially unlimited amount of tape that gives it an unbounded amount of storage space. </p><p>Another problem arises with Turing Machines is that some algorithms can be shown to be undecidable and so the machine will never halt. </p> <div class="mw-heading mw-heading4"><h4 id="The_halting_problem">The halting problem</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&veaction=edit&section=18" title="Edit section: The halting problem" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=edit&section=18" title="Edit section's source code: The halting problem"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The proof of the halting problem proceeds by reductio ad absurdum. We will assume that there is an algorithm described by the function <code>halt(a, i)</code> that decides if the algorithm encoded by the string <i>a</i> will halt when given as input the string <i>i</i>, and then show that this leads to a contradiction. </p><p>We start with assuming that there is a function <code>halt(a, i)</code> that returns <code><b>true</b></code> if the algorithm represented by the string <i>a</i> halts when given as input the string <i>i</i>, and returns <code><b>false</b></code> otherwise. (The existence of the universal Turing machine proves that every possible algorithm corresponds to at least one such string.) Given this algorithm we can construct another algorithm <code>trouble(s)</code> as follows: </p> <pre> <b>function</b> trouble(<i>string</i> s) <b>if</b> halt(s, s) = <b>false</b> <b>return</b> <b>true</b> <b>else</b> loop forever </pre> <p>This algorithm takes a string <i>s</i> as its argument and runs the algorithm <code>halt</code>, giving it <i>s</i> both as the description of the algorithm to check and as the initial data to feed to that algorithm. If <code>halt</code> returns <code><b>false</b></code>, then <code>trouble</code> returns <b>true</b>, otherwise <code>trouble</code> goes into an infinite loop. Since all algorithms can be represented by strings, there is a string <i>t</i> that represents the algorithm <code>trouble</code>. We can now ask the following question: </p> <dl><dd>Does <code>trouble(t)</code> halt?</dd></dl> <p>Let us consider both possible cases: </p> <ol><li>Assume that <code>trouble(t)</code> halts. The only way this can happen is that <code>halt(t, t)</code> returns <code><b>false</b></code>, but that in turn indicates that <code>trouble(t)</code> does not halt. Contradiction.</li> <li>Assume that <code>trouble(t)</code> does not halt. Since <code>halt</code> always halts, this can only happen when <code>trouble</code> goes into its infinite loop. This means that <code>halt(t, t)</code> must have returned <code><b>true</b></code>, since <code>trouble</code> would have returned immediately if it returned <code><b>false</b></code>. But that in turn would mean that <code>trouble(t)</code> does halt. Contradiction.</li></ol> <p>Since both cases lead to a contradiction, the initial assumption that the algorithm <code>halt</code> exists must be false. </p><p>This classic proof is typically referred to as the <b>diagonalization proof</b>, so called because if one imagines a grid containing all the values of <code>halt(a, i)</code>, with every possible <i>a</i> value given its own row, and every possible <i>i</i> value given its own column, then the values of <code>halt(s, s)</code> are arranged along the main diagonal of this grid. The proof can be framed in the form of the question: what row of the grid corresponds to the string <i>t</i>? The answer is that the <code>trouble</code> function is devised such that <code>halt(t, i)</code> differs from every row in the grid in at least one position: namely, the main diagonal, where <i>t</i>=<i>i</i>. This contradicts the requirement that the grid contains a row for every possible <i>a</i> value, and therefore constitutes a proof by contradiction that the halting problem is undecidable. </p> <div class="mw-heading mw-heading3"><h3 id="The_simulation_argument">The simulation argument</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&veaction=edit&section=19" title="Edit section: The simulation argument" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=edit&section=19" title="Edit section's source code: The simulation argument"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>According to this argument (Bostrom 2003) the universe could be a giant computer simulation that contains people as well as objects. Bostrom seems to believe that at any instant a collection of bits of information like electrons on silicon or specks of dust on a sheet could be conscious, he states that: </p><p>"A common assumption in the philosophy of mind is that of substrate-independence. The idea is that mental states can supervene on any of a broad class of physical substrates. Provided a system implements the right sort of computational structures and processes, it can be associated with conscious experiences." </p><p>He then goes on to argue that because of this assumption human beings could be simulations in a computer. Unfortunately, without tackling the problem of how a pattern of dust at an instant could be a person with 'conscious experience' the simulation argument is flawed. In fact even a person made of a moving pattern of dust over several instants is problematical without the assumptions of naive realism or dualism. Bostrom, puts 'mental' states' beyond physical explanation (i.e.: simply assumes that conscious mental states could exist in a pattern of electrons, dust or steel balls etc.). In view of this dualism, Bostrom's argument reduces to the proposal that the world is a digital simulation apart from something else required for endowing the simulations of people in the world with consciousness. </p> <div class="mw-heading mw-heading2"><h2 id="Notes_and_References">Notes and References</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&veaction=edit&section=20" title="Edit section: Notes and References" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&action=edit&section=20" title="Edit section's source code: Notes and References"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Note 1: Strictly this is the quantum 'amplitude' for the electron to go in a particular direction rather than the probability. </p><p><b>The philosophical problem</b> </p> <ul><li>Chalmers, D. (1996). The Conscious Mind. New York: Oxford University Press.</li></ul> <p><b>Epiphenomenalism and the problem of change</b> </p> <ul><li>Huxley, T. H. (1874.) On the Hypothesis that Animals are Automata, and its History. The Fortnightly Review: 16:555-580.</li></ul> <p><b>The Problem of Time</b> </p> <ul><li>Atmanspacher, H. (1989). The aspect of information production in the process of observation, in: Foundations of Physics, vol. 19, 1989, pp. 553–77</li> <li>Atmanspacher, H. (2000).Ontic and epistemic descriptions of chaotic systems. In Proceedings of CASYS 99, ed. by D. Dubois, Springer, Berlin 2000, pp. 465–478. <a rel="nofollow" class="external free" href="http://www.igpp.de/english/tda/pdf/liege.pdf">http://www.igpp.de/english/tda/pdf/liege.pdf</a></li> <li>de Broglie, L. (1925) On the theory of quanta. A translation of : RECHERCHES SUR LA THEORIE DES QUANTA (Ann. de Phys., 10e s´erie, t. III (Janvier-F ´evrier 1925).by: A. F. Kracklauer. <a rel="nofollow" class="external free" href="http://www.nonloco-physics.000freehosting.com/ldb_the.pdf">http://www.nonloco-physics.000freehosting.com/ldb_the.pdf</a></li> <li>Brown, K. (????) Mathpages 3.7 Zeno and the Paradox of Motion. <a rel="nofollow" class="external free" href="http://www.mathpages.com/rr/s3-07/3-07.htm">http://www.mathpages.com/rr/s3-07/3-07.htm</a></li> <li>Brown, K. (????) Mathpages Zeno and Uncertainty. <a rel="nofollow" class="external free" href="http://www.mathpages.com/home/kmath158.htm">http://www.mathpages.com/home/kmath158.htm</a></li> <li>Franck, G. (1994). Published in: Harald Atmanspacher and Gerhard J. Dalenoort (eds.), Inside Versus Outside. Endo- and Exo-Concepts of Observation and Knowledge in Physics, Philosophy, and Cognitive Science, Berlin: Springer,1994, pp. 63–83 <a rel="nofollow" class="external free" href="http://www.iemar.tuwien.ac.at/publications/GF_1994a.pdf">http://www.iemar.tuwien.ac.at/publications/GF_1994a.pdf</a></li> <li>Lynds, P. (2003). Time and Classical and Quantum Mechanics: Indeterminacy vs. Discontinuity. Foundations of Physics Letters, 16(4), 2003. <a rel="nofollow" class="external free" href="http://doc.cern.ch//archive/electronic/other/ext/ext-2003-042.pdf">http://doc.cern.ch//archive/electronic/other/ext/ext-2003-042.pdf</a></li> <li>McCall, S. 2000. QM and STR. The combining of quantum mechanics and</li></ul> <p>relativity theory. Philosophy of Science 67 (Proceedings), pp. S535-S548. <a rel="nofollow" class="external free" href="http://www.mcgill.ca/philosophy/faculty/mccall/">http://www.mcgill.ca/philosophy/faculty/mccall/</a> </p> <ul><li>McTaggart, J.M.E. (1908) The Unreality of Time. Published in Mind: A Quarterly Review of Psychology and Philosophy 17 (1908): 456–473. <a rel="nofollow" class="external free" href="http://www.ditext.com/mctaggart/time.html">http://www.ditext.com/mctaggart/time.html</a></li> <li>Petkov, V. (2002). Montreal Inter-University Seminar on the History and Philosophy of Science. <a rel="nofollow" class="external free" href="http://alcor.concordia.ca/~vpetkov/absolute.html">http://alcor.concordia.ca/~vpetkov/absolute.html</a></li> <li>Pollock, S. (2004) Physics 2170 - Notes for section 4. University of Colorado. <a rel="nofollow" class="external free" href="http://www.colorado.edu/physics/phys2170/phys2170_spring96/notes/2170_notes4_18.html">http://www.colorado.edu/physics/phys2170/phys2170_spring96/notes/2170_notes4_18.html</a></li> <li>Weyl, H. (1920). Space, Time, Matter.(Dover Edition).</li></ul> <p>Further reading: </p> <ul><li><a rel="nofollow" class="external text" href="http://psychclassics.yorku.ca/James/Principles/prin15.htm">James, W. (1890). The Principles of Psychology. CHAPTER XV. THE PERCEPTION OF TIME.</a></li> <li><a rel="nofollow" class="external text" href="http://www.ditext.com/mctaggart/time.html">Ellis McTaggart, J.M. (1908) The Unreality of Time. Mind: A Quarterly Review of Psychology and Philosophy 17 (1908): 456-473.</a></li> <li><a rel="nofollow" class="external text" href="http://web.archive.org/20050314020456/www.geocities.com/trolleylauncher/AJPPresentismConsciousnessFinalVersion.htm">McKinnon, N.(2003)Presentism and Consciousness. Australasian Journal of Philosophy 81:3 (2003), 305-323.</a></li> <li><a rel="nofollow" class="external text" href="http://www.bu.edu/wcp/Papers/Meta/MetaLamb.htm">Lamb, A.W. (1998) Granting Time Its Passage. Twentieth World Congress of Philosophy Boston, Massachusetts U.S.A. 10-15 August 1998</a></li> <li><a rel="nofollow" class="external text" href="http://www.iemar.tuwien.ac.at/publications/GF_1994a.pdf">Franck, G. (1994). Physical Time and Intrinsic Temporality. Published in: Harald Atmanspacher and Gerhard J. Dalenoort (eds.), Inside Versus Outside. Endo- and Exo-Concepts of Observation and Knowledge in Physics, Philosophy, and Cognitive Science, Berlin: Springer,1994, pp. 63-83</a></li> <li><a rel="nofollow" class="external text" href="http://cogprints.org/3125/01/Subjective_Perception_of_Time_and_a_Progressive_Present_Moment_-_The_Neurobiological_Key_to_Unlocking_Consciousness.pdf">Lynds, P. (2003). Subjective Perception of Time and a Progressive Present Moment: The Neurobiological Key to Unlocking Consciousness.</a></li> <li><a rel="nofollow" class="external text" href="http://www.brocku.ca/MeadProject/Whitehead/Whitehead_1920/White1_pref.html">Alfred North Whitehead. (1920) "Time". Chapter 3 in The Concept of Nature. Cambridge: Cambridge University Press (1920): 49-73.</a></li> <li><a rel="nofollow" class="external text" href="http://web.archive.org/web/20050530131441/http://www.iemar.tuwien.ac.at/publications/GF_2003c.pdf">Franck, G. HOW TIME PASSES. On Conceiving Time as a Process. Published in : R. Buccheri/ M. Saniga/ W.M. Stuckey (eds.), The Nature of Time: Geometry, Physics and Perception (NATO Science Series), Dodrecht: Kluwer Academic, 2003, pp. 91-103</a></li> <li><a rel="nofollow" class="external text" href="http://www.umkc.edu/scistud/psa98/papers/savitt.pdf">Savitt, S.F. (1998). There's no time like the present (in Minkowski space-time).</a></li> <li><a rel="nofollow" class="external text" href="http://plato.stanford.edu/entries/time-experience/">Le Poidevin, R. (2004) The Experience and Perception of Time. Stanford Encyclopedia of Philosophy.</a></li> <li>Norton, J. (2004) The Hole Argument. Stanford Encyclopedia of Philosophy. <a rel="nofollow" class="external free" href="http://plato.stanford.edu/entries/spacetime-holearg/index.html">http://plato.stanford.edu/entries/spacetime-holearg/index.html</a></li> <li>Rovelli, C. (2003) Quantum Gravity. Book. <a rel="nofollow" class="external free" href="http://www.cpt.univ-mrs.fr/~rovelli/book.pdf">http://www.cpt.univ-mrs.fr/~rovelli/book.pdf</a></li> <li>Penrose, R. 1989. The Emperor's New Mind: Concerning Computers, Minds, and Laws of Physics. New York and Oxford: Oxford University Press</li> <li>Stein, H. 1968. On Einstein-Minkowski Space-Time, The Journal of Philosophy 65: 5-23.</li> <li>Torretti, R. 1983. Relativity and Geometry. Oxford, New York, Toronto, Sydney, Paris, Frankfurt: Pergamon Press.</li></ul> <p><b>The existence of time</b> </p> <ul><li>Clay, ER (1882). The Alternative: A Study in Psychology, p. 167. (Quoted in James 1890).</li> <li>Gombrich, Ernst (1964) 'Moment and Movement in Art', Journal of the Warburg and Courtauld Institutes XXVII, 293–306. Quoted in Le Poidevin, R. (2000). The Experience and Perception of Time. Stanford Encyclopedia of Philosophy. <a rel="nofollow" class="external free" href="http://plato.stanford.edu/entries/time-experience">http://plato.stanford.edu/entries/time-experience</a></li> <li>James, W. (1890) .The Principles of Psychology <a rel="nofollow" class="external free" href="http://psychclassics.yorku.ca/James/Principles/prin15.htm">http://psychclassics.yorku.ca/James/Principles/prin15.htm</a></li> <li>Lindner <i>et al.</i> (2005) Attosecond double-slit experiment. Accepted for Physical Review Letters. <a rel="nofollow" class="external free" href="http://arxiv.org/abs/quant-ph/0503165">http://arxiv.org/abs/quant-ph/0503165</a></li> <li>Paulus, GG et al. (2003) PRL 91, 253004 (2003), <a rel="nofollow" class="external free" href="http://web.archive.org/web/20040702094913/http://mste.laser.physik.uni-muenchen.de/paulus.pdf">http://web.archive.org/web/20040702094913/http://mste.laser.physik.uni-muenchen.de/paulus.pdf</a></li> <li>Physics Web. New look for classic experiment. <a rel="nofollow" class="external free" href="http://physicsweb.org/articles/news/9/3/1/1?rss=2.0">http://physicsweb.org/articles/news/9/3/1/1?rss=2.0</a></li> <li>Rea, MC. (2004). Four Dimensionalism. The Oxford Handbook for Metaphysics <a rel="nofollow" class="external free" href="http://web.archive.org/web/20040407104606/http://www.nd.edu/~mrea/Online%20Papers/Four%20Dimensionalism.pdf">http://web.archive.org/web/20040407104606/http://www.nd.edu/~mrea/Online%20Papers/Four%20Dimensionalism.pdf</a></li> <li>Romer, H. (2004) Weak Quantum Theory and the Emergence of Time <a rel="nofollow" class="external free" href="http://arxiv.org/PS_cache/quant-ph/pdf/0402/0402011.pdf">http://arxiv.org/PS_cache/quant-ph/pdf/0402/0402011.pdf</a></li> <li>Amjorn, J <i>et al.</i> (2004). Emergence of a 4D world from causal quantum gravity. Phys.Rev.Lett. 93 (2004) 131301 <a rel="nofollow" class="external free" href="http://www.arxiv.org/PS_cache/hep-th/pdf/0404/0404156.pdf">http://www.arxiv.org/PS_cache/hep-th/pdf/0404/0404156.pdf</a></li></ul> <p>Useful Links </p> <ul><li>The web site of Dr Paulus, one of the principle physicists working on these femtosecond laser projects. <a rel="nofollow" class="external free" href="http://faculty.physics.tamu.edu/ggp/">http://faculty.physics.tamu.edu/ggp/</a></li></ul> <p><b>Relationalism, Substantivalism etc..</b> </p> <ul><li>Earman, J. (2002). Thoroughly Modern McTaggart. Philosophers’ Imprint. Vol. 2 No. 3. August 2002. <a rel="nofollow" class="external free" href="http://www.umich.edu/~philos/Imprint/frameset.html?002003+28+pdf">http://www.umich.edu/~philos/Imprint/frameset.html?002003+28+pdf</a></li> <li>Einstein, A. (1916b). Die Grundlage der allgemeinin Relativitatstheorie, Annalen der Physik. 49, 769 (1916); translated by W.Perrett and G.B.Je The Foundations of the tivity General Theory of Relativity, in The Principle of Relativity (Dover, New York, 1952), pp. 117–118. Pointed out by Lusanna and Pauri in their draft of "General Covariance and the Objectivity of Space-Time Point Events".</li> <li>Gardner, M. (1990). The New Ambidextrous Universe: Symmetry and Asymmetry, from Mirror Reflections to Superstrings.WH Freeman & Co. New York.</li> <li>Gaul, M. & Rovelli, C. (1999). Loop Quantum Gravity and the Meaning of Diffeomorphism Invariance. <a rel="nofollow" class="external free" href="http://arxiv.org/PS_cache/gr-qc/pdf/9910/9910079.pdf">http://arxiv.org/PS_cache/gr-qc/pdf/9910/9910079.pdf</a></li> <li>MacDonald, A. (2001). Einstein's Hole Argument. Am. J. Phys. 69, 223-225 (2001). <a rel="nofollow" class="external free" href="http://faculty.luther.edu/~macdonal/HoleArgument.pdf">http://faculty.luther.edu/~macdonal/HoleArgument.pdf</a></li> <li>Norton, J.D. (1993). General covariance and the foundations of general relativity: eight decades of dispute. Rep. Prog. Phys. 56 (1993) 791–858. <a rel="nofollow" class="external free" href="http://www.pitt.edu/~jdnorton/papers/decades.pdf">http://www.pitt.edu/~jdnorton/papers/decades.pdf</a></li> <li>Norton, J.D. (1999) A Conjecture on Einstein, the Independent Reality of Spacetime Coordinate Systems and the Disaster of 1913. <a rel="nofollow" class="external free" href="http://philoscience.unibe.ch/lehre/sommer05/Einstein%201905/Texte/113">http://philoscience.unibe.ch/lehre/sommer05/Einstein%201905/Texte/113</a></li> <li>Pooley, O. (2002). Handedness, parity violation,To appear in Katherine Brading and Elena Castellani (eds), in preparation, Symmetries in Physics: Philosophical Reflections (Cambridge: Cambridge University Press). <a rel="nofollow" class="external free" href="http://web.archive.org/web/20030624084411/http://users.ox.ac.uk/~ball0402/papers/parity.pdf">http://web.archive.org/web/20030624084411/http://users.ox.ac.uk/~ball0402/papers/parity.pdf</a></li></ul> <p><b>Quantum theory and time</b> </p> <ul><li>Hagan, S., Hammeroff, S.R. and Tuszynski, J.A.(2002). Quantum computation in brain microtubules: Decoherence and biological feasibility. PHYSICAL REVIEW E, VOLUME 65, 061901. <a rel="nofollow" class="external free" href="http://arxiv.org/abs/quant-ph/0005025">http://arxiv.org/abs/quant-ph/0005025</a></li> <li>Hawking, S. (1999) The future of quantum cosmology. <a rel="nofollow" class="external free" href="http://web.archive.org/web/20030311142458/http://www.hawking.org.uk/ps/futquan.ps">http://web.archive.org/web/20030311142458/http://www.hawking.org.uk/ps/futquan.ps</a></li> <li>Isham, C.J. (1993). Canonical quantum gravity and the problem of time.In Integrable Systems, Quantum Groups, and Quantum Field Theories, pages 157–288. Kluwer Academic Publishers, London, 1993. <a rel="nofollow" class="external free" href="http://arxiv.org/PS_cache/gr-qc/pdf/9210/9210011.pdf">http://arxiv.org/PS_cache/gr-qc/pdf/9210/9210011.pdf</a></li> <li>Isham, C.J. Structural Issues in Quantum Gravity. <a rel="nofollow" class="external free" href="http://lanl.arxiv.org/PS_cache/gr-qc/pdf/9510/9510063.pdf">http://lanl.arxiv.org/PS_cache/gr-qc/pdf/9510/9510063.pdf</a></li> <li>Jacobson, T. (1995). Thermodynamics of Spacetime: The Einstein Equation of State. Phys.Rev.Lett. 75 (1995) 1260-1263 <a rel="nofollow" class="external free" href="http://lanl.arxiv.org/PS_cache/gr-qc/pdf/9504/9504004.pdf">http://lanl.arxiv.org/PS_cache/gr-qc/pdf/9504/9504004.pdf</a></li> <li>Tegmark, M. (2000). The Importance of Quantum Decoherence in Brain Processes. Phys.Rev. E61 (2000) 4194-4206 <a rel="nofollow" class="external free" href="http://arxiv.org/PS_cache/quant-ph/pdf/9907/9907009.pdf">http://arxiv.org/PS_cache/quant-ph/pdf/9907/9907009.pdf</a></li> <li>Zeh, D. (2001) The Physical basis of the direction of time. Fourth edition <a href="/wiki/Special:BookSources/3-540-42081-9" title="Special:BookSources/3-540-42081-9">ISBN 3-540-42081-9</a> )- Springer-Verlag <a rel="nofollow" class="external free" href="http://www.rzuser.uni-heidelberg.de/~as3/time-direction/">http://www.rzuser.uni-heidelberg.de/~as3/time-direction/</a></li></ul> <p><b>The problem of qualia</b> </p> <ul><li>Alter, T. (1998). "A Limited Defense of the Knowledge Argument." Philosophical Studies 90: 35–56. But especially the discussion at the following web site: <a rel="nofollow" class="external free" href="http://host.uniroma3.it/progetti/kant/field/ka.html">http://host.uniroma3.it/progetti/kant/field/ka.html</a></li> <li>Anglin, J.R. & Zurek, J.H. (1996). Decoherence of quantum fields: decoherence and predictability. Phys.Rev. D53 (1996) 7327-7335 <a rel="nofollow" class="external free" href="http://arxiv.org/PS_cache/quant-ph/pdf/9510/9510021.pdf">http://arxiv.org/PS_cache/quant-ph/pdf/9510/9510021.pdf</a></li> <li>Bacciagaluppi, G. (2004). The role of decoherence in quantum theory. <a rel="nofollow" class="external free" href="http://plato.stanford.edu/entries/qm-decoherence/">http://plato.stanford.edu/entries/qm-decoherence/</a></li> <li>Dennett, D. (1991), Consciousness Explained, Boston: Little Brown and Company</li> <li>Dretske, F. (2003). Experience as Representation. Philosophical Issues 13, 67–82. <a rel="nofollow" class="external free" href="http://web.archive.org/20021124185049/humanities.ucsc.edu/NEH/dretske1.htm">http://web.archive.org/20021124185049/humanities.ucsc.edu/NEH/dretske1.htm</a></li> <li>Jackson, F. (1982) Epiphenomenal Qualia. Philosophical Quarterly, 32 (1982), pp. 127–36. <a rel="nofollow" class="external free" href="http://instruct.westvalley.edu/lafave/epiphenomenal_qualia.html">http://instruct.westvalley.edu/lafave/epiphenomenal_qualia.html</a></li> <li>Lehar, S. (2003) Gestalt Isomorphism and the Primacy of the Subjective Conscious Experience: A Gestalt Bubble Model. (2003) Behavioral & Brain Sciences 26(4), 375–444. <a rel="nofollow" class="external free" href="http://cns-alumni.bu.edu/~slehar/webstuff/bubw3/bubw3.html">http://cns-alumni.bu.edu/~slehar/webstuff/bubw3/bubw3.html</a></li> <li>Levine, J. (1983) “Materialism and Qualia: The Explanatory Gap”, Pacific. Philosophical Quarterly, 64: 354–61.</li> <li>Lycan, W. (1987). Consciousness, Cambridge, Mass : The MIT Press.</li> <li>Ogborn, J. & Taylor, E.F. (2005) Quantum physics explains Newton's laws of motion. Physics Education 40(1). 26–34. <a rel="nofollow" class="external free" href="http://www.eftaylor.com/pub/OgbornTaylor.pdf">http://www.eftaylor.com/pub/OgbornTaylor.pdf</a></li> <li>Strawson, G. (1994). Mental Reality, Cambridge USA: the MIT Press, Bradford Books.</li> <li>Tye, M. (1995). Ten Problems of Consciousness (Bradley Books, MIT Press),</li> <li>Tye, M. (2003). Visual qualia and visual content revisited. Ed. David Chalmers. OUP. <a rel="nofollow" class="external free" href="http://sun.soci.niu.edu/~phildept/MT/Visual.pdf">http://sun.soci.niu.edu/~phildept/MT/Visual.pdf</a></li> <li>Tye, M. (2003). Qualia. Stanford Encyclopedia of Philosophy. <a rel="nofollow" class="external free" href="http://plato.stanford.edu/entries/qualia/">http://plato.stanford.edu/entries/qualia/</a></li> <li>Zurek, W.H. (2003). Decoherence, einselection and the quantum origins of the classical. Rev. Mod. Phys. 75, 715 (2003) <a rel="nofollow" class="external free" href="http://arxiv.org/PS_cache/quant-ph/pdf/0105/0105127.pdf">http://arxiv.org/PS_cache/quant-ph/pdf/0105/0105127.pdf</a></li></ul> <p><b>Machine and digital consciousness</b> </p> <ul><li>Block, N. (1978). "Trouble with functionalism", In W. Savage (ed.),Perception and Cognition: Minnesota Studies in Philosophy of Science, Vol IX, Minnesota University Press, 1978, pp. 261–362; reprinted in Block (ed.) (1980), vol. I, pp. 268–305; reprinted (excerpt) in Lycan (ed.)(1990), pp. 444–468.</li> <li>Sternberg, E. (2007). <i>Are You a Machine? The Brain, the Mind and What it Means to be Human,</i> Prometheus Books.</li> <li><a rel="nofollow" class="external text" href="http://web.archive.org/20000823030455/members.aol.com/NeoNoetics/MindsBrainsPrograms.html">Searle, J.R. 1980. Minds Brains and Programs. The Behavioral and Brain Sciences, vol. 3. Copyright 1980 Cambridge University Press.</a></li> <li><a rel="nofollow" class="external text" href="http://www.simulation-argument.com/simulation.html">Bostrom, N. 2003. Are you living in a Computer Simulation? Philosophical Quarterly, 2003, Vol. 53, No. 211, pp. 243-255.</a></li></ul></div><!--esi <esi:include src="/esitest-fa8a495983347898/content" /> --><noscript><img src="https://login.wikimedia.org/wiki/Special:CentralAutoLogin/start?type=1x1" alt="" width="1" height="1" style="border: none; position: absolute;"></noscript> <div class="printfooter" data-nosnippet="">Retrieved from "<a dir="ltr" href="https://en.wikibooks.org/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&oldid=4343622">https://en.wikibooks.org/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&oldid=4343622</a>"</div></div> <div id="catlinks" class="catlinks" data-mw="interface"><div id="mw-normal-catlinks" class="mw-normal-catlinks"><a href="/wiki/Special:Categories" title="Special:Categories">Category</a>: <ul><li><a href="/wiki/Category:Book:Consciousness_Studies" title="Category:Book:Consciousness Studies">Book:Consciousness Studies</a></li></ul></div></div> </div> </main> </div> <div class="mw-footer-container"> <footer id="footer" class="mw-footer" > <ul id="footer-info"> <li id="footer-info-lastmod"> This page was last edited on 24 November 2023, at 09:56.</li> <li id="footer-info-copyright">Text is available under the <a rel="nofollow" class="external text" href="//creativecommons.org/licenses/by-sa/4.0/">Creative Commons Attribution-ShareAlike License</a>; additional terms may apply. By using this site, you agree to the <a class="external text" href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Terms_of_Use">Terms of Use</a> and <a class="external text" href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy">Privacy Policy.</a></li> </ul> <ul id="footer-places"> <li id="footer-places-privacy"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy">Privacy policy</a></li> <li id="footer-places-about"><a href="/wiki/Wikibooks:Welcome">About Wikibooks</a></li> <li id="footer-places-disclaimers"><a href="/wiki/Wikibooks:General_disclaimer">Disclaimers</a></li> <li id="footer-places-wm-codeofconduct"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Universal_Code_of_Conduct">Code of Conduct</a></li> <li id="footer-places-developers"><a href="https://developer.wikimedia.org">Developers</a></li> <li id="footer-places-statslink"><a href="https://stats.wikimedia.org/#/en.wikibooks.org">Statistics</a></li> <li id="footer-places-cookiestatement"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Cookie_statement">Cookie statement</a></li> <li id="footer-places-mobileview"><a href="//en.m.wikibooks.org/w/index.php?title=Consciousness_Studies/The_Philosophical_Problem/Machine_Consciousness&section=T-8&veaction=edit&mobileaction=toggle_view_mobile" class="noprint stopMobileRedirectToggle">Mobile view</a></li> </ul> <ul id="footer-icons" class="noprint"> <li id="footer-copyrightico"><a href="https://wikimediafoundation.org/" class="cdx-button cdx-button--fake-button cdx-button--size-large cdx-button--fake-button--enabled"><img src="/static/images/footer/wikimedia-button.svg" width="84" height="29" alt="Wikimedia Foundation" loading="lazy"></a></li> <li id="footer-poweredbyico"><a href="https://www.mediawiki.org/" class="cdx-button cdx-button--fake-button cdx-button--size-large cdx-button--fake-button--enabled"><img src="/w/resources/assets/poweredby_mediawiki.svg" alt="Powered by MediaWiki" width="88" height="31" loading="lazy"></a></li> </ul> </footer> </div> </div> </div> <div class="vector-settings" id="p-dock-bottom"> <ul></ul> </div><script>(RLQ=window.RLQ||[]).push(function(){mw.config.set({"wgHostname":"mw-web.codfw.main-f69cdc8f6-q4llr","wgBackendResponseTime":94,"wgPageParseReport":{"limitreport":{"cputime":"0.081","walltime":"0.121","ppvisitednodes":{"value":745,"limit":1000000},"postexpandincludesize":{"value":8156,"limit":2097152},"templateargumentsize":{"value":6844,"limit":2097152},"expansiondepth":{"value":11,"limit":100},"expensivefunctioncount":{"value":0,"limit":500},"unstrip-depth":{"value":0,"limit":20},"unstrip-size":{"value":0,"limit":5000000},"entityaccesscount":{"value":0,"limit":400},"timingprofile":["100.00% 69.253 1 -total"," 55.94% 38.740 8 Template:Font"," 38.77% 26.850 8 Template:Optional_style"," 34.73% 24.051 1 Template:BookCat"," 12.53% 8.679 1 Template:Evalx"," 8.15% 5.647 1 Template:ISBN"," 5.41% 3.747 1 Template:BOOKCATEGORY"," 2.85% 1.971 1 Template:NAIVEBOOKNAME"]},"scribunto":{"limitreport-timeusage":{"value":"0.018","limit":"10.000"},"limitreport-memusage":{"value":1025758,"limit":52428800}},"cachereport":{"origin":"mw-web.codfw.main-f69cdc8f6-jvw68","timestamp":"20241123164653","ttl":2592000,"transientcontent":false}}});});</script> </body> </html>