CINXE.COM

Sensory Systems/Visual System - Wikibooks, open books for an open world

<!DOCTYPE html> <html class="client-nojs vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-sticky-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-custom-font-size-clientpref-1 vector-feature-appearance-pinned-clientpref-1 vector-feature-night-mode-disabled skin-theme-clientpref-day vector-toc-available" lang="en" dir="ltr"> <head> <meta charset="UTF-8"> <title>Sensory Systems/Visual System - Wikibooks, open books for an open world</title> <script>(function(){var className="client-js vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-sticky-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-custom-font-size-clientpref-1 vector-feature-appearance-pinned-clientpref-1 vector-feature-night-mode-disabled skin-theme-clientpref-day vector-toc-available";var cookie=document.cookie.match(/(?:^|; )enwikibooksmwclientpreferences=([^;]+)/);if(cookie){cookie[1].split('%2C').forEach(function(pref){className=className.replace(new RegExp('(^| )'+pref.replace(/-clientpref-\w+$|[^\w-]+/g,'')+'-clientpref-\\w+( |$)'),'$1'+pref+'$2');});}document.documentElement.className=className;}());RLCONF={"wgBreakFrames":false,"wgSeparatorTransformTable":["",""],"wgDigitTransformTable":["",""],"wgDefaultDateFormat": "dmy","wgMonthNames":["","January","February","March","April","May","June","July","August","September","October","November","December"],"wgRequestId":"6580eecd-2955-4ed9-8278-e434f8cff001","wgCanonicalNamespace":"","wgCanonicalSpecialPageName":false,"wgNamespaceNumber":0,"wgPageName":"Sensory_Systems/Visual_System","wgTitle":"Sensory Systems/Visual System","wgCurRevisionId":3122959,"wgRevisionId":3122959,"wgArticleId":196478,"wgIsArticle":true,"wgIsRedirect":false,"wgAction":"view","wgUserName":null,"wgUserGroups":["*"],"wgCategories":["CS1 errors: DOI","CS1 errors: missing title","CS1 maint: multiple names: authors list","CS1 errors: empty unknown parameters","Book:Sensory Systems"],"wgPageViewLanguage":"en","wgPageContentLanguage":"en","wgPageContentModel":"wikitext","wgRelevantPageName":"Sensory_Systems/Visual_System","wgRelevantArticleId":196478,"wgIsProbablyEditable":true,"wgRelevantPageIsProbablyEditable":true,"wgRestrictionEdit":[],"wgRestrictionMove":[],"wgNoticeProject": "wikibooks","wgCiteReferencePreviewsActive":true,"wgFlaggedRevsParams":{"tags":{"value":{"levels":3}}},"wgStableRevisionId":3122959,"wgMediaViewerOnClick":true,"wgMediaViewerEnabledByDefault":true,"wgVisualEditor":{"pageLanguageCode":"en","pageLanguageDir":"ltr","pageVariantFallbacks":"en"},"wgMFDisplayWikibaseDescriptions":{"search":true,"watchlist":true,"tagline":false,"nearby":true},"wgWMESchemaEditAttemptStepOversample":false,"wgWMEPageLength":300,"wgEditSubmitButtonLabelPublish":true,"wgULSPosition":"interlanguage","wgULSisCompactLinksEnabled":false,"wgVector2022LanguageInHeader":true,"wgULSisLanguageSelectorEmpty":false,"wgWikibaseItemId":"Q65353426","wgCheckUserClientHintsHeadersJsApi":["brands","architecture","bitness","fullVersionList","mobile","model","platform","platformVersion"],"wgSiteNoticeId":"2.24"};RLSTATE={"ext.globalCssJs.user.styles":"ready","site.styles":"ready","user.styles":"ready","ext.globalCssJs.user":"ready","user":"ready","user.options":"loading", "mediawiki.page.gallery.styles":"ready","ext.math.styles":"ready","ext.cite.styles":"ready","skins.vector.search.codex.styles":"ready","skins.vector.styles":"ready","skins.vector.icons":"ready","ext.flaggedRevs.basic":"ready","mediawiki.codex.messagebox.styles":"ready","ext.wikimediamessages.styles":"ready","ext.visualEditor.desktopArticleTarget.noscript":"ready","ext.uls.interlanguage":"ready","wikibase.client.init":"ready","ext.wikimediaBadges":"ready","ext.dismissableSiteNotice.styles":"ready"};RLPAGEMODULES=["ext.cite.ux-enhancements","mediawiki.page.media","site","mediawiki.page.ready","mediawiki.toc","skins.vector.js","ext.centralNotice.geoIP","ext.centralNotice.startUp","ext.flaggedRevs.advanced","ext.gadget.wikidialog","ext.gadget.commons-file","ext.urlShortener.toolbar","ext.centralauth.centralautologin","mmv.bootstrap","ext.visualEditor.desktopArticleTarget.init","ext.visualEditor.targetLoader","ext.echo.centralauth","ext.eventLogging","ext.wikimediaEvents", "ext.navigationTiming","ext.uls.interface","wikibase.client.vector-2022","ext.checkUser.clientHints","wikibase.sidebar.tracking","ext.dismissableSiteNotice"];</script> <script>(RLQ=window.RLQ||[]).push(function(){mw.loader.impl(function(){return["user.options@12s5i",function($,jQuery,require,module){mw.user.tokens.set({"patrolToken":"+\\","watchToken":"+\\","csrfToken":"+\\"}); }];});});</script> <link rel="stylesheet" href="/w/load.php?lang=en&amp;modules=ext.cite.styles%7Cext.dismissableSiteNotice.styles%7Cext.flaggedRevs.basic%7Cext.math.styles%7Cext.uls.interlanguage%7Cext.visualEditor.desktopArticleTarget.noscript%7Cext.wikimediaBadges%7Cext.wikimediamessages.styles%7Cmediawiki.codex.messagebox.styles%7Cmediawiki.page.gallery.styles%7Cskins.vector.icons%2Cstyles%7Cskins.vector.search.codex.styles%7Cwikibase.client.init&amp;only=styles&amp;skin=vector-2022"> <script async="" src="/w/load.php?lang=en&amp;modules=startup&amp;only=scripts&amp;raw=1&amp;skin=vector-2022"></script> <meta name="ResourceLoaderDynamicStyles" content=""> <link rel="stylesheet" href="/w/load.php?lang=en&amp;modules=site.styles&amp;only=styles&amp;skin=vector-2022"> <meta name="generator" content="MediaWiki 1.44.0-wmf.5"> <meta name="referrer" content="origin"> <meta name="referrer" content="origin-when-cross-origin"> <meta name="robots" content="max-image-preview:standard"> <meta name="format-detection" content="telephone=no"> <meta name="viewport" content="width=1120"> <meta property="og:title" content="Sensory Systems/Visual System - Wikibooks, open books for an open world"> <meta property="og:type" content="website"> <link rel="preconnect" href="//upload.wikimedia.org"> <link rel="alternate" media="only screen and (max-width: 640px)" href="//en.m.wikibooks.org/wiki/Sensory_Systems/Visual_System"> <link rel="alternate" type="application/x-wiki" title="Edit" href="/w/index.php?title=Sensory_Systems/Visual_System&amp;action=edit"> <link rel="icon" href="/static/favicon/wikibooks.ico"> <link rel="search" type="application/opensearchdescription+xml" href="/w/rest.php/v1/search" title="Wikibooks (en)"> <link rel="EditURI" type="application/rsd+xml" href="//en.wikibooks.org/w/api.php?action=rsd"> <link rel="canonical" href="https://en.wikibooks.org/wiki/Sensory_Systems/Visual_System"> <link rel="license" href="https://creativecommons.org/licenses/by-sa/4.0/deed.en"> <link rel="alternate" type="application/atom+xml" title="Wikibooks Atom feed" href="/w/index.php?title=Special:RecentChanges&amp;feed=atom"> <link rel="dns-prefetch" href="//meta.wikimedia.org" /> <link rel="dns-prefetch" href="//login.wikimedia.org"> </head> <body class="skin--responsive skin-vector skin-vector-search-vue mediawiki ltr sitedir-ltr mw-hide-empty-elt ns-0 ns-subject mw-editable page-Sensory_Systems_Visual_System rootpage-Sensory_Systems skin-vector-2022 action-view"><a class="mw-jump-link" href="#bodyContent">Jump to content</a> <div class="vector-header-container"> <header class="vector-header mw-header"> <div class="vector-header-start"> <nav class="vector-main-menu-landmark" aria-label="Site"> <div id="vector-main-menu-dropdown" class="vector-dropdown vector-main-menu-dropdown vector-button-flush-left vector-button-flush-right" > <input type="checkbox" id="vector-main-menu-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-main-menu-dropdown" class="vector-dropdown-checkbox " aria-label="Main menu" > <label id="vector-main-menu-dropdown-label" for="vector-main-menu-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-menu mw-ui-icon-wikimedia-menu"></span> <span class="vector-dropdown-label-text">Main menu</span> </label> <div class="vector-dropdown-content"> <div id="vector-main-menu-unpinned-container" class="vector-unpinned-container"> <div id="vector-main-menu" class="vector-main-menu vector-pinnable-element"> <div class="vector-pinnable-header vector-main-menu-pinnable-header vector-pinnable-header-unpinned" data-feature-name="main-menu-pinned" data-pinnable-element-id="vector-main-menu" data-pinned-container-id="vector-main-menu-pinned-container" data-unpinned-container-id="vector-main-menu-unpinned-container" > <div class="vector-pinnable-header-label">Main menu</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-main-menu.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-main-menu.unpin">hide</button> </div> <div id="p-navigation" class="vector-menu mw-portlet mw-portlet-navigation" > <div class="vector-menu-heading"> Navigation </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-mainpage" class="mw-list-item"><a href="/wiki/Main_Page" title="Visit the main page [z]" accesskey="z"><span>Main Page</span></a></li><li id="n-help" class="mw-list-item"><a href="/wiki/Help:Contents" title="Find help on how to use and edit Wikibooks"><span>Help</span></a></li><li id="n-Browse" class="mw-list-item"><a href="/wiki/Wikibooks:Card_Catalog_Office" title="Check out what Wikibooks has to offer"><span>Browse</span></a></li><li id="n-Cookbook" class="mw-list-item"><a href="/wiki/Cookbook:Table_of_Contents" title="Learn recipes from around the world"><span>Cookbook</span></a></li><li id="n-Wikijunior" class="mw-list-item"><a href="/wiki/Wikijunior" title="Books for children"><span>Wikijunior</span></a></li><li id="n-Featured-books" class="mw-list-item"><a href="/wiki/Wikibooks:Featured_books" title="The best of Wikibooks"><span>Featured books</span></a></li><li id="n-recentchanges" class="mw-list-item"><a href="/wiki/Special:RecentChanges" title="A list of recent changes in the wiki [r]" accesskey="r"><span>Recent changes</span></a></li><li id="n-randomrootpage" class="mw-list-item"><a href="/wiki/Special:RandomInCategory/Book:Wikibooks_Stacks/Books"><span>Random book</span></a></li><li id="n-Using-Wikibooks" class="mw-list-item"><a href="/wiki/Using_Wikibooks"><span>Using Wikibooks</span></a></li> </ul> </div> </div> <div id="p-community" class="vector-menu mw-portlet mw-portlet-community" > <div class="vector-menu-heading"> Community </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-Reading-room-forum" class="mw-list-item"><a href="/wiki/Wikibooks:Reading_room"><span>Reading room forum</span></a></li><li id="n-portal" class="mw-list-item"><a href="/wiki/Wikibooks:Community_Portal" title="Find your way around the Wikibooks community"><span>Community portal</span></a></li><li id="n-currentevents" class="mw-list-item"><a href="/wiki/Wikibooks:Reading_room/Bulletin_Board" title="Important community news"><span>Bulletin Board</span></a></li><li id="n-maintenance" class="mw-list-item"><a href="/wiki/Wikibooks:Maintenance" title="Frequent tasks that you can help with"><span>Help out!</span></a></li><li id="n-Policies-and-guidelines" class="mw-list-item"><a href="/wiki/Wikibooks:Policies_and_guidelines" title="Pages detailing important rules and procedures"><span>Policies and guidelines</span></a></li><li id="n-contact" class="mw-list-item"><a href="/wiki/Wikibooks:Contact_us" title="Alternative methods of communication"><span>Contact us</span></a></li> </ul> </div> </div> </div> </div> </div> </div> </nav> <a href="/wiki/Main_Page" class="mw-logo"> <img class="mw-logo-icon" src="/static/images/icons/wikibooks.svg" alt="" aria-hidden="true" height="50" width="50"> <span class="mw-logo-container skin-invert"> <img class="mw-logo-wordmark" alt="Wikibooks" src="/static/images/mobile/copyright/wikibooks-wordmark-vi.svg" style="width: 7.5em; height: 0.9375em;"> <img class="mw-logo-tagline" alt="The Free Textbook Project" src="/static/images/mobile/copyright/wikibooks-tagline-en.svg" width="120" height="10" style="width: 7.5em; height: 0.625em;"> </span> </a> </div> <div class="vector-header-end"> <div id="p-search" role="search" class="vector-search-box-vue vector-search-box-collapses vector-search-box-show-thumbnail vector-search-box-auto-expand-width vector-search-box"> <a href="/wiki/Special:Search" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only search-toggle" title="Search Wikibooks [f]" accesskey="f"><span class="vector-icon mw-ui-icon-search mw-ui-icon-wikimedia-search"></span> <span>Search</span> </a> <div class="vector-typeahead-search-container"> <div class="cdx-typeahead-search cdx-typeahead-search--show-thumbnail cdx-typeahead-search--auto-expand-width"> <form action="/w/index.php" id="searchform" class="cdx-search-input cdx-search-input--has-end-button"> <div id="simpleSearch" class="cdx-search-input__input-wrapper" data-search-loc="header-moved"> <div class="cdx-text-input cdx-text-input--has-start-icon"> <input class="cdx-text-input__input" type="search" name="search" placeholder="Search Wikibooks" aria-label="Search Wikibooks" autocapitalize="sentences" title="Search Wikibooks [f]" accesskey="f" id="searchInput" > <span class="cdx-text-input__icon cdx-text-input__start-icon"></span> </div> <input type="hidden" name="title" value="Special:Search"> </div> <button class="cdx-button cdx-search-input__end-button">Search</button> </form> </div> </div> </div> <nav class="vector-user-links vector-user-links-wide" aria-label="Personal tools"> <div class="vector-user-links-main"> <div id="p-vector-user-menu-preferences" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <div id="p-vector-user-menu-userpage" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <nav class="vector-appearance-landmark" aria-label="Appearance"> <div id="vector-appearance-dropdown" class="vector-dropdown " title="Change the appearance of the page&#039;s font size, width, and color" > <input type="checkbox" id="vector-appearance-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-appearance-dropdown" class="vector-dropdown-checkbox " aria-label="Appearance" > <label id="vector-appearance-dropdown-label" for="vector-appearance-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-appearance mw-ui-icon-wikimedia-appearance"></span> <span class="vector-dropdown-label-text">Appearance</span> </label> <div class="vector-dropdown-content"> <div id="vector-appearance-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <div id="p-vector-user-menu-notifications" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <div id="p-vector-user-menu-overflow" class="vector-menu mw-portlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-sitesupport-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="//donate.wikimedia.org/wiki/Special:FundraiserRedirector?utm_source=donate&amp;utm_medium=sidebar&amp;utm_campaign=C13_en.wikibooks.org&amp;uselang=en" class=""><span>Donations</span></a> </li> <li id="pt-createaccount-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:CreateAccount&amp;returnto=Sensory+Systems%2FVisual+System" title="You are encouraged to create an account and log in; however, it is not mandatory" class=""><span>Create account</span></a> </li> <li id="pt-login-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:UserLogin&amp;returnto=Sensory+Systems%2FVisual+System" title="You are encouraged to log in; however, it is not mandatory [o]" accesskey="o" class=""><span>Log in</span></a> </li> </ul> </div> </div> </div> <div id="vector-user-links-dropdown" class="vector-dropdown vector-user-menu vector-button-flush-right vector-user-menu-logged-out" title="More options" > <input type="checkbox" id="vector-user-links-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-user-links-dropdown" class="vector-dropdown-checkbox " aria-label="Personal tools" > <label id="vector-user-links-dropdown-label" for="vector-user-links-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-ellipsis mw-ui-icon-wikimedia-ellipsis"></span> <span class="vector-dropdown-label-text">Personal tools</span> </label> <div class="vector-dropdown-content"> <div id="p-personal" class="vector-menu mw-portlet mw-portlet-personal user-links-collapsible-item" title="User menu" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-sitesupport" class="user-links-collapsible-item mw-list-item"><a href="//donate.wikimedia.org/wiki/Special:FundraiserRedirector?utm_source=donate&amp;utm_medium=sidebar&amp;utm_campaign=C13_en.wikibooks.org&amp;uselang=en"><span>Donations</span></a></li><li id="pt-createaccount" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:CreateAccount&amp;returnto=Sensory+Systems%2FVisual+System" title="You are encouraged to create an account and log in; however, it is not mandatory"><span class="vector-icon mw-ui-icon-userAdd mw-ui-icon-wikimedia-userAdd"></span> <span>Create account</span></a></li><li id="pt-login" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:UserLogin&amp;returnto=Sensory+Systems%2FVisual+System" title="You are encouraged to log in; however, it is not mandatory [o]" accesskey="o"><span class="vector-icon mw-ui-icon-logIn mw-ui-icon-wikimedia-logIn"></span> <span>Log in</span></a></li> </ul> </div> </div> <div id="p-user-menu-anon-editor" class="vector-menu mw-portlet mw-portlet-user-menu-anon-editor" > <div class="vector-menu-heading"> Pages for logged out editors <a href="/wiki/Help:Introduction" aria-label="Learn more about editing"><span>learn more</span></a> </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-anoncontribs" class="mw-list-item"><a href="/wiki/Special:MyContributions" title="A list of edits made from this IP address [y]" accesskey="y"><span>Contributions</span></a></li><li id="pt-anontalk" class="mw-list-item"><a href="/wiki/Special:MyTalk" title="Discussion about edits from this IP address [n]" accesskey="n"><span>Discussion for this IP address</span></a></li> </ul> </div> </div> </div> </div> </nav> </div> </header> </div> <div class="mw-page-container"> <div class="mw-page-container-inner"> <div class="vector-sitenotice-container"> <div id="siteNotice"><div id="mw-dismissablenotice-anonplace"></div><script>(function(){var node=document.getElementById("mw-dismissablenotice-anonplace");if(node){node.outerHTML="\u003Cdiv class=\"mw-dismissable-notice\"\u003E\u003Cdiv class=\"mw-dismissable-notice-close\"\u003E[\u003Ca tabindex=\"0\" role=\"button\"\u003Edismiss\u003C/a\u003E]\u003C/div\u003E\u003Cdiv class=\"mw-dismissable-notice-body\"\u003E\u003C!-- CentralNotice --\u003E\u003Cdiv id=\"localNotice\" data-nosnippet=\"\"\u003E\u003Cdiv class=\"anonnotice\" lang=\"en\" dir=\"ltr\"\u003E\u003Cdiv style=\"width: fit-content; padding: 5px; border: 3px solid gray; background-color: #fafafa; text-align: center; margin: auto;\"\u003E\n\u003Cp\u003E\u003Cspan typeof=\"mw:File\"\u003E\u003Ca href=\"/wiki/File:OOjs_UI_icon_robot.svg\" class=\"mw-file-description\"\u003E\u003Cimg src=\"//upload.wikimedia.org/wikipedia/commons/thumb/7/71/OOjs_UI_icon_robot.svg/40px-OOjs_UI_icon_robot.svg.png\" decoding=\"async\" width=\"40\" height=\"40\" class=\"mw-file-element\" srcset=\"//upload.wikimedia.org/wikipedia/commons/thumb/7/71/OOjs_UI_icon_robot.svg/60px-OOjs_UI_icon_robot.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/7/71/OOjs_UI_icon_robot.svg/80px-OOjs_UI_icon_robot.svg.png 2x\" data-file-width=\"20\" data-file-height=\"20\" /\u003E\u003C/a\u003E\u003C/span\u003E The Wikibooks community is developing a policy on the use of generative AI. Please review the \u003Ca href=\"/wiki/Wikibooks:Artificial_Intelligence\" title=\"Wikibooks:Artificial Intelligence\"\u003Edraft policy\u003C/a\u003E and provide feedback on \u003Ca href=\"/wiki/Wikibooks_talk:Artificial_Intelligence\" title=\"Wikibooks talk:Artificial Intelligence\"\u003Eits talk page\u003C/a\u003E.\n\u003C/p\u003E\n\u003C/div\u003E\u003C/div\u003E\u003C/div\u003E\u003C/div\u003E\u003C/div\u003E";}}());</script></div> </div> <div class="vector-column-start"> <div class="vector-main-menu-container"> <div id="mw-navigation"> <nav id="mw-panel" class="vector-main-menu-landmark" aria-label="Site"> <div id="vector-main-menu-pinned-container" class="vector-pinned-container"> </div> </nav> </div> </div> <div class="vector-sticky-pinned-container"> <nav id="mw-panel-toc" aria-label="Contents" data-event-name="ui.sidebar-toc" class="mw-table-of-contents-container vector-toc-landmark"> <div id="vector-toc-pinned-container" class="vector-pinned-container"> <div id="vector-toc" class="vector-toc vector-pinnable-element"> <div class="vector-pinnable-header vector-toc-pinnable-header vector-pinnable-header-pinned" data-feature-name="toc-pinned" data-pinnable-element-id="vector-toc" > <h2 class="vector-pinnable-header-label">Contents</h2> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-toc.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-toc.unpin">hide</button> </div> <ul class="vector-toc-contents" id="mw-panel-toc-list"> <li id="toc-mw-content-text" class="vector-toc-list-item vector-toc-level-1"> <a href="#" class="vector-toc-link"> <div class="vector-toc-text">Beginning</div> </a> </li> <li id="toc-Introduction" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#Introduction"> <div class="vector-toc-text"> <span class="vector-toc-numb">1</span> <span>Introduction</span> </div> </a> <button aria-controls="toc-Introduction-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Introduction subsection</span> </button> <ul id="toc-Introduction-sublist" class="vector-toc-list"> <li id="toc-Sensory_Organs" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Sensory_Organs"> <div class="vector-toc-text"> <span class="vector-toc-numb">1.1</span> <span>Sensory Organs</span> </div> </a> <ul id="toc-Sensory_Organs-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Anatomy_of_the_Visual_System" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#Anatomy_of_the_Visual_System"> <div class="vector-toc-text"> <span class="vector-toc-numb">2</span> <span>Anatomy of the Visual System</span> </div> </a> <button aria-controls="toc-Anatomy_of_the_Visual_System-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Anatomy of the Visual System subsection</span> </button> <ul id="toc-Anatomy_of_the_Visual_System-sublist" class="vector-toc-list"> <li id="toc-Getting_inside_of_the_eyeball_-_Pupil,_iris_and_the_lens" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Getting_inside_of_the_eyeball_-_Pupil,_iris_and_the_lens"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.1</span> <span>Getting inside of the eyeball - Pupil, iris and the lens</span> </div> </a> <ul id="toc-Getting_inside_of_the_eyeball_-_Pupil,_iris_and_the_lens-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Beamforming_in_the_eye_–_Cornea_and_its_protecting_agent_-_Sclera" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Beamforming_in_the_eye_–_Cornea_and_its_protecting_agent_-_Sclera"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.2</span> <span>Beamforming in the eye – Cornea and its protecting agent - Sclera</span> </div> </a> <ul id="toc-Beamforming_in_the_eye_–_Cornea_and_its_protecting_agent_-_Sclera-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Moving_the_eyes_–_extra-ocular_muscles" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Moving_the_eyes_–_extra-ocular_muscles"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.3</span> <span>Moving the eyes – extra-ocular muscles</span> </div> </a> <ul id="toc-Moving_the_eyes_–_extra-ocular_muscles-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Where_the_vision_reception_occurs_–_The_retina" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Where_the_vision_reception_occurs_–_The_retina"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.4</span> <span>Where the vision reception occurs – The retina</span> </div> </a> <ul id="toc-Where_the_vision_reception_occurs_–_The_retina-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Signal_Processing" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#Signal_Processing"> <div class="vector-toc-text"> <span class="vector-toc-numb">3</span> <span>Signal Processing</span> </div> </a> <button aria-controls="toc-Signal_Processing-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Signal Processing subsection</span> </button> <ul id="toc-Signal_Processing-sublist" class="vector-toc-list"> <li id="toc-Creation_of_the_initial_signals_-_Photosensor_Function" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Creation_of_the_initial_signals_-_Photosensor_Function"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.1</span> <span>Creation of the initial signals - Photosensor Function</span> </div> </a> <ul id="toc-Creation_of_the_initial_signals_-_Photosensor_Function-sublist" class="vector-toc-list"> <li id="toc-Photo_Transduction" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Photo_Transduction"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.1.1</span> <span>Photo Transduction</span> </div> </a> <ul id="toc-Photo_Transduction-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Processing_Signals_in_the_Retina" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Processing_Signals_in_the_Retina"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.2</span> <span>Processing Signals in the Retina</span> </div> </a> <ul id="toc-Processing_Signals_in_the_Retina-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Signal_Transmission_to_the_Cortex" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Signal_Transmission_to_the_Cortex"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.3</span> <span>Signal Transmission to the Cortex</span> </div> </a> <ul id="toc-Signal_Transmission_to_the_Cortex-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Information_Processing_in_the_Visual_System" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Information_Processing_in_the_Visual_System"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.4</span> <span>Information Processing in the Visual System</span> </div> </a> <ul id="toc-Information_Processing_in_the_Visual_System-sublist" class="vector-toc-list"> <li id="toc-Deep_Hierarchies_in_the_Primate_Visual_Cortex" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Deep_Hierarchies_in_the_Primate_Visual_Cortex"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.4.1</span> <span>Deep Hierarchies in the Primate Visual Cortex</span> </div> </a> <ul id="toc-Deep_Hierarchies_in_the_Primate_Visual_Cortex-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Sub-cortical_vision" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Sub-cortical_vision"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.4.2</span> <span>Sub-cortical vision</span> </div> </a> <ul id="toc-Sub-cortical_vision-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Cortical_vision" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Cortical_vision"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.4.3</span> <span>Cortical vision</span> </div> </a> <ul id="toc-Cortical_vision-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Inferior_temporal_cortex:_object_discrimination" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Inferior_temporal_cortex:_object_discrimination"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.4.4</span> <span>Inferior temporal cortex: object discrimination</span> </div> </a> <ul id="toc-Inferior_temporal_cortex:_object_discrimination-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Learning_in_the_Visual_System" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Learning_in_the_Visual_System"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.4.5</span> <span>Learning in the Visual System</span> </div> </a> <ul id="toc-Learning_in_the_Visual_System-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Deep_Neural_Networks" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Deep_Neural_Networks"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.4.6</span> <span>Deep Neural Networks</span> </div> </a> <ul id="toc-Deep_Neural_Networks-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-References" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#References"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.4.7</span> <span>References</span> </div> </a> <ul id="toc-References-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Motion_Perception" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Motion_Perception"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.5</span> <span>Motion Perception</span> </div> </a> <ul id="toc-Motion_Perception-sublist" class="vector-toc-list"> <li id="toc-Motion_Analysis_and_Motion_Perception" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Motion_Analysis_and_Motion_Perception"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.5.1</span> <span>Motion Analysis and Motion Perception</span> </div> </a> <ul id="toc-Motion_Analysis_and_Motion_Perception-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Motion_Sensors" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Motion_Sensors"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.5.2</span> <span>Motion Sensors</span> </div> </a> <ul id="toc-Motion_Sensors-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Motion_Sensors_are_Orientation-Selective" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Motion_Sensors_are_Orientation-Selective"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.5.3</span> <span>Motion Sensors are Orientation-Selective</span> </div> </a> <ul id="toc-Motion_Sensors_are_Orientation-Selective-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Feature_Tracking" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Feature_Tracking"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.5.4</span> <span>Feature Tracking</span> </div> </a> <ul id="toc-Feature_Tracking-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Motion_Illusions" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Motion_Illusions"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.5.5</span> <span>Motion Illusions</span> </div> </a> <ul id="toc-Motion_Illusions-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Open_Problems" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Open_Problems"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.5.6</span> <span>Open Problems</span> </div> </a> <ul id="toc-Open_Problems-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Conclusions" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Conclusions"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.5.7</span> <span>Conclusions</span> </div> </a> <ul id="toc-Conclusions-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Color_Perception" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Color_Perception"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.6</span> <span>Color Perception</span> </div> </a> <ul id="toc-Color_Perception-sublist" class="vector-toc-list"> <li id="toc-Introduction_2" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Introduction_2"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.6.1</span> <span>Introduction</span> </div> </a> <ul id="toc-Introduction_2-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Color_Perception_at_the_Retina" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Color_Perception_at_the_Retina"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.6.2</span> <span>Color Perception at the Retina</span> </div> </a> <ul id="toc-Color_Perception_at_the_Retina-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-From_the_Retina_to_the_Brain" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#From_the_Retina_to_the_Brain"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.6.3</span> <span>From the Retina to the Brain</span> </div> </a> <ul id="toc-From_the_Retina_to_the_Brain-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Color_Perception_at_the_Brain" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Color_Perception_at_the_Brain"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.6.4</span> <span>Color Perception at the Brain</span> </div> </a> <ul id="toc-Color_Perception_at_the_Brain-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Color_Vision_Adaptation_Mechanisms" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Color_Vision_Adaptation_Mechanisms"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.6.5</span> <span>Color Vision Adaptation Mechanisms</span> </div> </a> <ul id="toc-Color_Vision_Adaptation_Mechanisms-sublist" class="vector-toc-list"> <li id="toc-Dark_Adaptation" class="vector-toc-list-item vector-toc-level-4"> <a class="vector-toc-link" href="#Dark_Adaptation"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.6.5.1</span> <span>Dark Adaptation</span> </div> </a> <ul id="toc-Dark_Adaptation-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Light_Adaptation" class="vector-toc-list-item vector-toc-level-4"> <a class="vector-toc-link" href="#Light_Adaptation"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.6.5.2</span> <span>Light Adaptation</span> </div> </a> <ul id="toc-Light_Adaptation-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Chromatic_Adaptation" class="vector-toc-list-item vector-toc-level-4"> <a class="vector-toc-link" href="#Chromatic_Adaptation"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.6.5.3</span> <span>Chromatic Adaptation</span> </div> </a> <ul id="toc-Chromatic_Adaptation-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-References_2" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#References_2"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.6.6</span> <span>References</span> </div> </a> <ul id="toc-References_2-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> </ul> </li> </ul> </div> </div> </nav> </div> </div> <div class="mw-content-container"> <main id="content" class="mw-body"> <header class="mw-body-header vector-page-titlebar"> <nav aria-label="Contents" class="vector-toc-landmark"> <div id="vector-page-titlebar-toc" class="vector-dropdown vector-page-titlebar-toc vector-button-flush-left" > <input type="checkbox" id="vector-page-titlebar-toc-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-titlebar-toc" class="vector-dropdown-checkbox " aria-label="Toggle the table of contents" > <label id="vector-page-titlebar-toc-label" for="vector-page-titlebar-toc-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-listBullet mw-ui-icon-wikimedia-listBullet"></span> <span class="vector-dropdown-label-text">Toggle the table of contents</span> </label> <div class="vector-dropdown-content"> <div id="vector-page-titlebar-toc-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <h1 id="firstHeading" class="firstHeading mw-first-heading"><span class="mw-page-title-main">Sensory Systems/Visual System</span></h1> <div id="p-lang-btn" class="vector-dropdown mw-portlet mw-portlet-lang" > <input type="checkbox" id="p-lang-btn-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-p-lang-btn" class="vector-dropdown-checkbox mw-interlanguage-selector" aria-label="Go to an article in another language. Available in 6 languages" > <label id="p-lang-btn-label" for="p-lang-btn-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--action-progressive mw-portlet-lang-heading-6" aria-hidden="true" ><span class="vector-icon mw-ui-icon-language-progressive mw-ui-icon-wikimedia-language-progressive"></span> <span class="vector-dropdown-label-text">6 languages</span> </label> <div class="vector-dropdown-content"> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li class="interlanguage-link interwiki-bg mw-list-item"><a href="https://bg.wikibooks.org/wiki/%D0%A1%D0%B5%D1%82%D0%B8%D0%B2%D0%BD%D0%B8_%D1%81%D0%B8%D1%81%D1%82%D0%B5%D0%BC%D0%B8/%D0%97%D1%80%D0%B8%D1%82%D0%B5%D0%BB%D0%BD%D0%B0_%D1%81%D0%B5%D1%82%D0%B8%D0%B2%D0%BD%D0%B0_%D1%81%D0%B8%D1%81%D1%82%D0%B5%D0%BC%D0%B0" title="Сетивни системи/Зрителна сетивна система – Bulgarian" lang="bg" hreflang="bg" data-title="Сетивни системи/Зрителна сетивна система" data-language-autonym="Български" data-language-local-name="Bulgarian" class="interlanguage-link-target"><span>Български</span></a></li><li class="interlanguage-link interwiki-de mw-list-item"><a href="https://de.wikibooks.org/wiki/Sensorische_Systeme/_Visuelles_System" title="Sensorische Systeme/ Visuelles System – German" lang="de" hreflang="de" data-title="Sensorische Systeme/ Visuelles System" data-language-autonym="Deutsch" data-language-local-name="German" class="interlanguage-link-target"><span>Deutsch</span></a></li><li class="interlanguage-link interwiki-fr mw-list-item"><a href="https://fr.wikibooks.org/wiki/Syst%C3%A8mes_sensoriels/Syst%C3%A8me_Visuel" title="Systèmes sensoriels/Système Visuel – French" lang="fr" hreflang="fr" data-title="Systèmes sensoriels/Système Visuel" data-language-autonym="Français" data-language-local-name="French" class="interlanguage-link-target"><span>Français</span></a></li><li class="interlanguage-link interwiki-it mw-list-item"><a href="https://it.wikibooks.org/wiki/Sistemi_sensoriali/Sistema_visivo" title="Sistemi sensoriali/Sistema visivo – Italian" lang="it" hreflang="it" data-title="Sistemi sensoriali/Sistema visivo" data-language-autonym="Italiano" data-language-local-name="Italian" class="interlanguage-link-target"><span>Italiano</span></a></li><li class="interlanguage-link interwiki-ko mw-list-item"><a href="https://ko.wikibooks.org/wiki/%EA%B0%90%EA%B0%81_%EC%8B%A0%EA%B2%BD%EA%B3%84/%EC%8B%9C%EA%B0%81_%EC%8B%A0%EA%B2%BD%EA%B3%84" title="감각 신경계/시각 신경계 – Korean" lang="ko" hreflang="ko" data-title="감각 신경계/시각 신경계" data-language-autonym="한국어" data-language-local-name="Korean" class="interlanguage-link-target"><span>한국어</span></a></li><li class="interlanguage-link interwiki-pt mw-list-item"><a href="https://pt.wikibooks.org/wiki/Sistemas_Sensoriais/Sistema_Visual" title="Sistemas Sensoriais/Sistema Visual – Portuguese" lang="pt" hreflang="pt" data-title="Sistemas Sensoriais/Sistema Visual" data-language-autonym="Português" data-language-local-name="Portuguese" class="interlanguage-link-target"><span>Português</span></a></li> </ul> <div class="after-portlet after-portlet-lang"><span class="wb-langlinks-edit wb-langlinks-link"><a href="https://www.wikidata.org/wiki/Special:EntityPage/Q65353426#sitelinks-wikibooks" title="Edit interlanguage links" class="wbc-editpage">Edit links</a></span></div> </div> </div> </div> </header> <div class="vector-page-toolbar"> <div class="vector-page-toolbar-container"> <div id="left-navigation"> <nav aria-label="Namespaces"> <div id="p-associated-pages" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-associated-pages" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-nstab-main" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/Sensory_Systems/Visual_System" title="View the content page [c]" accesskey="c"><span>Book</span></a></li><li id="ca-talk" class="vector-tab-noicon mw-list-item"><a href="/wiki/Talk:Sensory_Systems/Visual_System" rel="discussion" title="Discussion about the content page [t]" accesskey="t"><span>Discussion</span></a></li> </ul> </div> </div> <div id="vector-variants-dropdown" class="vector-dropdown emptyPortlet" > <input type="checkbox" id="vector-variants-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-variants-dropdown" class="vector-dropdown-checkbox " aria-label="Change language variant" > <label id="vector-variants-dropdown-label" for="vector-variants-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">English</span> </label> <div class="vector-dropdown-content"> <div id="p-variants" class="vector-menu mw-portlet mw-portlet-variants emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> </div> </div> </nav> </div> <div id="right-navigation" class="vector-collapsible"> <nav aria-label="Views"> <div id="p-views" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-views" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-view" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/Sensory_Systems/Visual_System"><span>Read</span></a></li><li id="ca-ve-edit" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Sensory_Systems/Visual_System&amp;veaction=edit" title="Edit this page [v]" accesskey="v"><span>Edit</span></a></li><li id="ca-edit" class="collapsible vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Sensory_Systems/Visual_System&amp;action=edit" title="Edit the source code of this page [e]" accesskey="e"><span>Edit source</span></a></li><li id="ca-history" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Sensory_Systems/Visual_System&amp;action=history" title="Past revisions of this page [h]" accesskey="h"><span>View history</span></a></li> </ul> </div> </div> </nav> <nav class="vector-page-tools-landmark" aria-label="Page tools"> <div id="vector-page-tools-dropdown" class="vector-dropdown vector-page-tools-dropdown" > <input type="checkbox" id="vector-page-tools-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-tools-dropdown" class="vector-dropdown-checkbox " aria-label="Tools" > <label id="vector-page-tools-dropdown-label" for="vector-page-tools-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">Tools</span> </label> <div class="vector-dropdown-content"> <div id="vector-page-tools-unpinned-container" class="vector-unpinned-container"> <div id="vector-page-tools" class="vector-page-tools vector-pinnable-element"> <div class="vector-pinnable-header vector-page-tools-pinnable-header vector-pinnable-header-unpinned" data-feature-name="page-tools-pinned" data-pinnable-element-id="vector-page-tools" data-pinned-container-id="vector-page-tools-pinned-container" data-unpinned-container-id="vector-page-tools-unpinned-container" > <div class="vector-pinnable-header-label">Tools</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-page-tools.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-page-tools.unpin">hide</button> </div> <div id="p-cactions" class="vector-menu mw-portlet mw-portlet-cactions emptyPortlet vector-has-collapsible-items" title="More options" > <div class="vector-menu-heading"> Actions </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-more-view" class="selected vector-more-collapsible-item mw-list-item"><a href="/wiki/Sensory_Systems/Visual_System"><span>Read</span></a></li><li id="ca-more-ve-edit" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Sensory_Systems/Visual_System&amp;veaction=edit" title="Edit this page [v]" accesskey="v"><span>Edit</span></a></li><li id="ca-more-edit" class="collapsible vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Sensory_Systems/Visual_System&amp;action=edit" title="Edit the source code of this page [e]" accesskey="e"><span>Edit source</span></a></li><li id="ca-more-history" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Sensory_Systems/Visual_System&amp;action=history"><span>View history</span></a></li> </ul> </div> </div> <div id="p-tb" class="vector-menu mw-portlet mw-portlet-tb" > <div class="vector-menu-heading"> General </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="t-whatlinkshere" class="mw-list-item"><a href="/wiki/Special:WhatLinksHere/Sensory_Systems/Visual_System" title="A list of all wiki pages that link here [j]" accesskey="j"><span>What links here</span></a></li><li id="t-recentchangeslinked" class="mw-list-item"><a href="/wiki/Special:RecentChangesLinked/Sensory_Systems/Visual_System" rel="nofollow" title="Recent changes in pages linked from this page [k]" accesskey="k"><span>Related changes</span></a></li><li id="t-upload" class="mw-list-item"><a href="//commons.wikimedia.org/wiki/Special:UploadWizard?uselang=en" title="Upload files [u]" accesskey="u"><span>Upload file</span></a></li><li id="t-specialpages" class="mw-list-item"><a href="/wiki/Special:SpecialPages" title="A list of all special pages [q]" accesskey="q"><span>Special pages</span></a></li><li id="t-permalink" class="mw-list-item"><a href="/w/index.php?title=Sensory_Systems/Visual_System&amp;oldid=3122959" title="Permanent link to this revision of this page"><span>Permanent link</span></a></li><li id="t-info" class="mw-list-item"><a href="/w/index.php?title=Sensory_Systems/Visual_System&amp;action=info" title="More information about this page"><span>Page information</span></a></li><li id="t-cite" class="mw-list-item"><a href="/w/index.php?title=Special:CiteThisPage&amp;page=Sensory_Systems%2FVisual_System&amp;id=3122959&amp;wpFormIdentifier=titleform" title="Information on how to cite this page"><span>Cite this page</span></a></li><li id="t-urlshortener" class="mw-list-item"><a href="/w/index.php?title=Special:UrlShortener&amp;url=https%3A%2F%2Fen.wikibooks.org%2Fwiki%2FSensory_Systems%2FVisual_System"><span>Get shortened URL</span></a></li><li id="t-urlshortener-qrcode" class="mw-list-item"><a href="/w/index.php?title=Special:QrCode&amp;url=https%3A%2F%2Fen.wikibooks.org%2Fwiki%2FSensory_Systems%2FVisual_System"><span>Download QR code</span></a></li> </ul> </div> </div> <div id="p-sister_projects" class="vector-menu mw-portlet mw-portlet-sister_projects" > <div class="vector-menu-heading"> Sister projects </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-Wikipedia" class="mw-list-item"><a href="https://en.wikipedia.org/wiki/Main_Page"><span>Wikipedia</span></a></li><li id="n-Wikiversity" class="mw-list-item"><a href="https://en.wikiversity.org/wiki/Wikiversity:Main_Page"><span>Wikiversity</span></a></li><li id="n-Wiktionary" class="mw-list-item"><a href="https://en.wiktionary.org/wiki/Wiktionary:Main_Page"><span>Wiktionary</span></a></li><li id="n-Wikiquote" class="mw-list-item"><a href="https://en.wikiquote.org/wiki/Main_Page"><span>Wikiquote</span></a></li><li id="n-Wikisource" class="mw-list-item"><a href="https://en.wikisource.org/wiki/Main_Page"><span>Wikisource</span></a></li><li id="n-Wikinews" class="mw-list-item"><a href="https://en.wikinews.org/wiki/Main_Page"><span>Wikinews</span></a></li><li id="n-Wikivoyage" class="mw-list-item"><a href="https://en.wikivoyage.org/wiki/Main_Page"><span>Wikivoyage</span></a></li><li id="n-Commons" class="mw-list-item"><a href="https://commons.wikimedia.org/wiki/Main_Page"><span>Commons</span></a></li><li id="n-Wikidata" class="mw-list-item"><a href="https://www.wikidata.org/wiki/Wikidata:Main_Page"><span>Wikidata</span></a></li><li id="n-MediaWiki" class="mw-list-item"><a href="https://www.mediawiki.org/wiki/Main_Page"><span>MediaWiki</span></a></li><li id="n-Meta-Wiki" class="mw-list-item"><a href="https://meta.wikimedia.org/wiki/Main_Page"><span>Meta-Wiki</span></a></li> </ul> </div> </div> <div id="p-coll-print_export" class="vector-menu mw-portlet mw-portlet-coll-print_export" > <div class="vector-menu-heading"> Print/export </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="coll-create_a_book" class="mw-list-item"><a href="/w/index.php?title=Special:Book&amp;bookcmd=book_creator&amp;referer=Sensory+Systems%2FVisual+System"><span>Create a collection</span></a></li><li id="coll-download-as-rl" class="mw-list-item"><a href="/w/index.php?title=Special:DownloadAsPdf&amp;page=Sensory_Systems%2FVisual_System&amp;action=show-download-screen"><span>Download as PDF</span></a></li><li id="t-print" class="mw-list-item"><a href="/w/index.php?title=Sensory_Systems/Visual_System&amp;printable=yes" title="Printable version of this page [p]" accesskey="p"><span>Printable version</span></a></li> </ul> </div> </div> <div id="p-wikibase-otherprojects" class="vector-menu mw-portlet mw-portlet-wikibase-otherprojects" > <div class="vector-menu-heading"> In other projects </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="t-wikibase" class="wb-otherproject-link wb-otherproject-wikibase-dataitem mw-list-item"><a href="https://www.wikidata.org/wiki/Special:EntityPage/Q65353426" title="Link to connected data repository item [g]" accesskey="g"><span>Wikidata item</span></a></li> </ul> </div> </div> </div> </div> </div> </div> </nav> </div> </div> </div> <div class="vector-column-end"> <div class="vector-sticky-pinned-container"> <nav class="vector-page-tools-landmark" aria-label="Page tools"> <div id="vector-page-tools-pinned-container" class="vector-pinned-container"> </div> </nav> <nav class="vector-appearance-landmark" aria-label="Appearance"> <div id="vector-appearance-pinned-container" class="vector-pinned-container"> <div id="vector-appearance" class="vector-appearance vector-pinnable-element"> <div class="vector-pinnable-header vector-appearance-pinnable-header vector-pinnable-header-pinned" data-feature-name="appearance-pinned" data-pinnable-element-id="vector-appearance" data-pinned-container-id="vector-appearance-pinned-container" data-unpinned-container-id="vector-appearance-unpinned-container" > <div class="vector-pinnable-header-label">Appearance</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-appearance.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-appearance.unpin">hide</button> </div> </div> </div> </nav> </div> </div> <div id="bodyContent" class="vector-body" aria-labelledby="firstHeading" data-mw-ve-target-container> <div class="vector-body-before-content"> <div class="mw-indicators"> </div> <div id="siteSub" class="noprint">From Wikibooks, open books for an open world</div> </div> <div id="contentSub"><div id="mw-content-subtitle"><div class="subpages">&lt; <bdi dir="ltr"><a href="/wiki/Sensory_Systems" title="Sensory Systems">Sensory Systems</a></bdi></div></div></div> <div id="mw-content-text" class="mw-body-content"><div class="mw-content-ltr mw-parser-output" lang="en" dir="ltr"><div style="width:150px; float:right; clear:right; margin-left:15px; border:1px solid #999999; background-color:#f4f4ff; text-align:center;"> <div style="padding:0.3em; background-color:#ccccff; font-size:150%;"><a href="/wiki/Sensory_Systems" title="Sensory Systems">Sensory Systems</a></div> <div style="padding:0.3em; line-height:1.5em;"> <p><a class="mw-selflink selflink">Vision</a><br /> <a href="/wiki/Sensory_Systems/Auditory_System" title="Sensory Systems/Auditory System">Hearing</a><br /> <a href="/wiki/Sensory_Systems/Vestibular_System" title="Sensory Systems/Vestibular System">Balance</a><br /> <a href="/wiki/Sensory_Systems/Somatosensory_System" title="Sensory Systems/Somatosensory System">Feeling</a><br /> <a href="/wiki/Sensory_Systems/Olfactory_System" title="Sensory Systems/Olfactory System">Smell</a><br /> <a href="/wiki/Sensory_Systems/Gustatory_System" title="Sensory Systems/Gustatory System">Taste</a><br /> </p> </div> <div style="padding:0.3em; background-color:#ccccff; font-size:0.9em; font-family:sans-serif;">Technological Aspects</div> <div style="padding:0.3em; line-height:1.5em;"> <p><a href="/wiki/Sensory_Systems/Neurosensory_Implants" title="Sensory Systems/Neurosensory Implants">Implants</a><br /> <a href="/wiki/Sensory_Systems/Computer_Models" title="Sensory Systems/Computer Models">Models</a> </p> </div> <div style="padding:0.3em; background-color:#ccccff; font-size:0.9em; font-family:sans-serif;">In Animals</div> <div style="padding:0.3em; line-height:1.5em;"> <p><a href="/wiki/Sensory_Systems/Birds" title="Sensory Systems/Birds">Birds</a><br /> <a href="/wiki/Sensory_Systems/Fish" title="Sensory Systems/Fish">Fish</a><br /> <a href="/wiki/Sensory_Systems/Marine_Animals" title="Sensory Systems/Marine Animals">Marine Animals</a><br /> <a href="/wiki/Sensory_Systems/Arthropods" title="Sensory Systems/Arthropods">Arthropods</a><br /> <a href="/wiki/Sensory_Systems/Other_Animals" title="Sensory Systems/Other Animals">Other Animals</a> </p> </div> </div> <div class="noprint toclimit-3" style="float:left; margin:0.25em 0.5em 0.5em 0.25em; padding:0.5em 1.4em 0.8em 0em; background:transparent;"><meta property="mw:PageProp/toc" /></div> <div class="mw-heading mw-heading2"><h2 id="Introduction">Introduction</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Anatomy&amp;veaction=edit&amp;section=T-1" title="Edit section: Introduction" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Anatomy&amp;action=edit&amp;section=T-1" title="Edit section&#039;s source code: Introduction"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Generally speaking, visual systems rely on electromagnetic (EM) waves to give an organism more information about its surroundings. This information could be regarding potential mates, dangers and sources of sustenance. Different organisms have different constituents that make up what is referred to as a visual system. </p><p>The complexity of eyes range from something as simple as an eye spot, which is nothing more than a collection of photosensitive cells, to a fully fledged camera eye. If an organism has different types of photosensitive cells, or cells sensitive to different wavelength ranges, the organism would theoretically be able to perceive colour or at the very least colour differences. Polarisation, another property of EM radiation, can be detected by some organisms, with insects and cephalopods having the highest accuracy. </p><p>Please note, in this text, the focus has been on using EM waves to see. Granted, some organisms have evolved alternative ways of obtaining sight or at the very least supplementing what they see with extra-sensory information. For example, whales or bats, which use echo-location. This may be seeing in some sense of the definition of the word, but it is not entirely correct. Additionally, vision and visual are words most often associated with EM waves in the visual wavelength range, which is normally defined as the same wavelength limits of human vision. </p> <figure typeof="mw:File/Thumb"><a href="/wiki/File:Electromagnetic_spectrum_-eng.svg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/3/3c/Electromagnetic_spectrum_-eng.svg/689px-Electromagnetic_spectrum_-eng.svg.png" decoding="async" width="689" height="211" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/3/3c/Electromagnetic_spectrum_-eng.svg/1034px-Electromagnetic_spectrum_-eng.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/3/3c/Electromagnetic_spectrum_-eng.svg/1378px-Electromagnetic_spectrum_-eng.svg.png 2x" data-file-width="1176" data-file-height="360" /></a><figcaption>Electromagnetic spectrum</figcaption></figure> <p><br /> Since some organisms detect EM waves with frequencies below and above that of humans a better definition must be made. We therefore define the visual wavelength range as wavelengths of EM between 300nm and 800nm. This may seem arbitrary to some, but selecting the wrong limits would render parts of some bird's vision as non-vision. Also, with this range of wavelengths, we have defined for example the thermal-vision of certain organisms, like for example snakes as non-vision. Therefore snakes using their pit organs, which is sensitive to EM between 5000nm and 30,000nm (IR), do not "see", but somehow "feel" from afar. Even if blind specimens have been documented targeting and attacking particular body parts. </p><p>Firstly a brief description of different types of visual system sensory organs will be elaborated on, followed by a thorough explanation of the components in human vision, the signal processing of the visual pathway in humans and finished off with an example of the perceptional outcome due to these stages. </p> <div class="mw-heading mw-heading3"><h3 id="Sensory_Organs">Sensory Organs</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Anatomy&amp;veaction=edit&amp;section=T-2" title="Edit section: Sensory Organs" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Anatomy&amp;action=edit&amp;section=T-2" title="Edit section&#039;s source code: Sensory Organs"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Vision, or the ability to see depends on visual system sensory organs or eyes. There are many different constructions of eyes, ranging in complexity depending on the requirements of the organism. The different constructions have different capabilities, are sensitive to different wave-lengths and have differing degrees of acuity, also they require different processing to make sense of the input and different numbers to work optimally. The ability to detect and decipher EM has proved to be a valuable asset to most forms of life, leading to an increased chance of survival for organisms that utilise it. In environments without sufficient light, or complete lack of it, lifeforms have no added advantage of vision, which ultimately has resulted in atrophy of visual sensory organs with subsequent increased reliance on other senses (e.g. some cave dwelling animals, bats etc.). Interestingly enough, it appears that visual sensory organs are tuned to the optical window, which is defined as the EM wavelengths (between 300nm and 1100nm) that pass through the atmosphere reaching to the ground. This is shown in the figure below. You may notice that there exists other "windows", an IR window, which explains to some extent the thermal-"vision" of snakes, and a radiofrequency (RF) window, of which no known lifeforms are able to detect. </p> <figure class="mw-halign-center" typeof="mw:File"><a href="/wiki/File:Atmospheric_electromagnetic_opacity.svg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/3/34/Atmospheric_electromagnetic_opacity.svg/700px-Atmospheric_electromagnetic_opacity.svg.png" decoding="async" width="700" height="331" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/3/34/Atmospheric_electromagnetic_opacity.svg/1050px-Atmospheric_electromagnetic_opacity.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/3/34/Atmospheric_electromagnetic_opacity.svg/1400px-Atmospheric_electromagnetic_opacity.svg.png 2x" data-file-width="1650" data-file-height="780" /></a><figcaption></figcaption></figure> <p>Through time evolution has yielded many eye constructions, and some of them have evolved multiple times, yielding similarities for organisms that have similar niches. There is one underlying aspect that is essentially identical, regardless of species, or complexity of sensory organ type, the universal usage of light-sensitive proteins called opsins. Without focusing too much on the molecular basis though, the various constructions can be categorised into distinct groups: </p> <ul><li>Spot Eyes</li></ul> <ul><li>Pit Eyes</li></ul> <ul><li>Pinhole Eyes</li></ul> <ul><li>Lens Eyes</li></ul> <ul><li>Refractive Cornea Eyes</li></ul> <ul><li>Reflector Eyes</li></ul> <ul><li>Compound Eyes</li></ul> <p>The least complicated configuration of eyes enable organisms to simply sense the ambient light, enabling the organism to know whether there is light or not. It is normally simply a collection of photosensitive cells in a cluster in the same spot, thus sometimes referred to as spot eyes, eye spot or stemma. By either adding more angular structures or recessing the spot eyes, an organisms gains access to directional information as well, which is a vital requirement for image formation. These so called pit eyes are by far the most common types of visual sensory organs, and can be found in over 95% of all known species. </p> <figure class="mw-halign-left" typeof="mw:File/Thumb"><a href="/wiki/File:Nautilus_pompilius_(head).jpg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/9/9c/Nautilus_pompilius_%28head%29.jpg/200px-Nautilus_pompilius_%28head%29.jpg" decoding="async" width="200" height="133" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/9/9c/Nautilus_pompilius_%28head%29.jpg/300px-Nautilus_pompilius_%28head%29.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/9/9c/Nautilus_pompilius_%28head%29.jpg/400px-Nautilus_pompilius_%28head%29.jpg 2x" data-file-width="4272" data-file-height="2848" /></a><figcaption>Pinhole eye</figcaption></figure> <p>Taking this approach to the obvious extreme leads to the pit becoming a cavernous structure, which increases the sharpness of the image, alas at a loss in intensity. In other words, there is a trade-off between intensity or brightness and sharpness. An example of this can be found in the Nautilus, species belonging to the family Nautilidae, organisms considered to be living fossils. They are the only known species that has this type of eye, referred to as the pinhole eye, and it is completely analogous to the pinhole camera or the camera obscura. In addition, like more advanced cameras, Nautili are able to adjust the size of the aperture thereby increasing or decreasing the resolution of the eye at a respective decrease or increase in image brightness. Like the camera, the way to alleviate the intensity/resolution trade-off problem is to include a lens, a structure that focuses the light unto a central area, which most often has a higher density of photo-sensors. By adjusting the shape of the lens and moving it around, and controlling the size of the aperture or pupil, organisms can adapt to different conditions and focus on particular regions of interest in any visual scene. The last upgrade to the various eye constructions already mentioned is the inclusion of a refractive cornea. Eyes with this structure have delegated two thirds of the total optic power of the eye to the high refractive index liquid inside the cornea, enabling very high resolution vision. Most land animals, including humans have eyes of this particular construct. Additionally, many variations of lens structure, lens number, photosensor density, fovea shape, fovea number, pupil shape etc. exists, always, to increase the chances of survival for the organism in question. These variations lead to a varied outward appearance of eyes, even with a single eye construction category. Demonstrating this point, a collection of photographs of animals with the same eye category (refractive cornea eyes) is shown below. </p> <center><b>Refractive Cornea Eyes</b></center> <table border="0" align="center"> <tbody><tr> <td> <figure typeof="mw:File/Thumb"><a href="/wiki/File:Hawk_eye.jpg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/3/3e/Hawk_eye.jpg/150px-Hawk_eye.jpg" decoding="async" width="150" height="149" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/3/3e/Hawk_eye.jpg/225px-Hawk_eye.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/3/3e/Hawk_eye.jpg/300px-Hawk_eye.jpg 2x" data-file-width="941" data-file-height="933" /></a><figcaption>Hawk Eye</figcaption></figure> </td> <td> <figure typeof="mw:File/Thumb"><a href="/wiki/File:Eye_Coburger_Fuchsschaf.jpg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/d/d7/Eye_Coburger_Fuchsschaf.jpg/150px-Eye_Coburger_Fuchsschaf.jpg" decoding="async" width="150" height="113" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/d/d7/Eye_Coburger_Fuchsschaf.jpg/225px-Eye_Coburger_Fuchsschaf.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/d/d7/Eye_Coburger_Fuchsschaf.jpg/300px-Eye_Coburger_Fuchsschaf.jpg 2x" data-file-width="3173" data-file-height="2380" /></a><figcaption>Sheep Eye</figcaption></figure> </td> <td> <figure typeof="mw:File/Thumb"><a href="/wiki/File:%D0%98%D0%B7%D0%BE%D0%B1%D1%80%D0%B0%D0%B6%D0%B5%D0%BD%D0%B8%D0%B5_307.jpg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/1/1b/%D0%98%D0%B7%D0%BE%D0%B1%D1%80%D0%B0%D0%B6%D0%B5%D0%BD%D0%B8%D0%B5_307.jpg/150px-%D0%98%D0%B7%D0%BE%D0%B1%D1%80%D0%B0%D0%B6%D0%B5%D0%BD%D0%B8%D0%B5_307.jpg" decoding="async" width="150" height="121" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/1/1b/%D0%98%D0%B7%D0%BE%D0%B1%D1%80%D0%B0%D0%B6%D0%B5%D0%BD%D0%B8%D0%B5_307.jpg/225px-%D0%98%D0%B7%D0%BE%D0%B1%D1%80%D0%B0%D0%B6%D0%B5%D0%BD%D0%B8%D0%B5_307.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/1/1b/%D0%98%D0%B7%D0%BE%D0%B1%D1%80%D0%B0%D0%B6%D0%B5%D0%BD%D0%B8%D0%B5_307.jpg/300px-%D0%98%D0%B7%D0%BE%D0%B1%D1%80%D0%B0%D0%B6%D0%B5%D0%BD%D0%B8%D0%B5_307.jpg 2x" data-file-width="2089" data-file-height="1680" /></a><figcaption>Cat Eye</figcaption></figure> </td> <td> <figure typeof="mw:File/Thumb"><a href="/wiki/File:Eye_iris.jpg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/6/65/Eye_iris.jpg/150px-Eye_iris.jpg" decoding="async" width="150" height="100" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/6/65/Eye_iris.jpg/225px-Eye_iris.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/6/65/Eye_iris.jpg/300px-Eye_iris.jpg 2x" data-file-width="2788" data-file-height="1864" /></a><figcaption>Human Eye</figcaption></figure> </td> <td> <figure typeof="mw:File/Thumb"><a href="/wiki/File:A_crocodiles_eye_(7825799462).jpg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/2/22/A_crocodiles_eye_%287825799462%29.jpg/150px-A_crocodiles_eye_%287825799462%29.jpg" decoding="async" width="150" height="100" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/2/22/A_crocodiles_eye_%287825799462%29.jpg/225px-A_crocodiles_eye_%287825799462%29.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/2/22/A_crocodiles_eye_%287825799462%29.jpg/300px-A_crocodiles_eye_%287825799462%29.jpg 2x" data-file-width="2555" data-file-height="1703" /></a><figcaption>Crocodile Eye</figcaption></figure> </td> </tr> </tbody></table> <p>An alternative to the lens approach called reflector eyes can be found in for example mollusks. Instead of the conventional way of focusing light to a single point in the back of the eye using a lens or a system of lenses, these organisms have mirror like structures inside the chamber of the eye that reflects the light into a central portion, much like a parabola dish. Although there are no known examples of organisms with reflector eyes capable of image formation, at least one species of fish, the spookfish (Dolichopteryx longipes) uses them in combination with "normal" lensed eyes. </p> <figure class="mw-halign-left" typeof="mw:File/Thumb"><a href="/wiki/File:Volucella_pellucens_head_complete_Richard_Bartz.jpg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/d/d4/Volucella_pellucens_head_complete_Richard_Bartz.jpg/200px-Volucella_pellucens_head_complete_Richard_Bartz.jpg" decoding="async" width="200" height="140" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/d/d4/Volucella_pellucens_head_complete_Richard_Bartz.jpg/300px-Volucella_pellucens_head_complete_Richard_Bartz.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/d/d4/Volucella_pellucens_head_complete_Richard_Bartz.jpg/400px-Volucella_pellucens_head_complete_Richard_Bartz.jpg 2x" data-file-width="2500" data-file-height="1744" /></a><figcaption>Compound eye</figcaption></figure> <p>The last group of eyes, found in insects and crustaceans, is called compound eyes. These eyes consist of a number of functional sub-units called ommatidia, each consisting of a facet, or front surface, a transparent crystalline cone and photo-sensitive cells for detection. In addition each of the ommatidia are separated by pigment cells, ensuring the incoming light is as parallel as possible. The combination of the outputs of each of these ommatidia form a mosaic image, with a resolution proportional to the number of ommatidia units. For example, if humans had compound eyes, the eyes would have covered our entire faces to retain the same resolution. As a note, there are many types of compound eyes, but delving to deep into this topic is beyond the scope of this text. </p><p>Not only the type of eyes vary, but also the number of eyes. As you are well aware of, humans usually have two eyes, spiders on the other hand have a varying number of eyes, with most species having 8. Normally the spiders also have varying sizes of the different pairs of eyes and the differing sizes have different functions. For example, in jumping spiders 2 larger front facing eyes, give the spider excellent visual acuity, which is used mainly to target prey. 6 smaller eyes have much poorer resolution, but helps the spider to avoid potential dangers. Two photographs of the eyes of a jumping spider and the eyes of a wolf spider are shown to demonstrate the variability in the eye topologies of arachnids. </p> <center> <ul class="gallery mw-gallery-traditional"> <li class="gallerycaption">Eye Topologies of Spiders</li> <li class="gallerybox" style="width: 155px"> <div class="thumb" style="width: 150px; height: 150px;"><span typeof="mw:File"><a href="/wiki/File:Wolf_eyes_for_guide.jpg" class="mw-file-description" title="Wolf Spider"><img alt="Wolf Spider" src="//upload.wikimedia.org/wikipedia/commons/thumb/2/29/Wolf_eyes_for_guide.jpg/120px-Wolf_eyes_for_guide.jpg" decoding="async" width="120" height="72" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/2/29/Wolf_eyes_for_guide.jpg/180px-Wolf_eyes_for_guide.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/2/29/Wolf_eyes_for_guide.jpg/240px-Wolf_eyes_for_guide.jpg 2x" data-file-width="1200" data-file-height="722" /></a></span></div> <div class="gallerytext">Wolf Spider</div> </li> <li class="gallerybox" style="width: 155px"> <div class="thumb" style="width: 150px; height: 150px;"><span typeof="mw:File"><a href="/wiki/File:Phidippus_pius_eyes.jpg" class="mw-file-description" title="Jumping Spider"><img alt="Jumping Spider" src="//upload.wikimedia.org/wikipedia/commons/thumb/8/85/Phidippus_pius_eyes.jpg/120px-Phidippus_pius_eyes.jpg" decoding="async" width="120" height="84" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/8/85/Phidippus_pius_eyes.jpg/180px-Phidippus_pius_eyes.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/8/85/Phidippus_pius_eyes.jpg/240px-Phidippus_pius_eyes.jpg 2x" data-file-width="800" data-file-height="562" /></a></span></div> <div class="gallerytext">Jumping Spider</div> </li> </ul> </center> <div class="mw-heading mw-heading2"><h2 id="Anatomy_of_the_Visual_System">Anatomy of the Visual System</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Anatomy&amp;veaction=edit&amp;section=T-3" title="Edit section: Anatomy of the Visual System" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Anatomy&amp;action=edit&amp;section=T-3" title="Edit section&#039;s source code: Anatomy of the Visual System"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>We humans are visual creatures, therefore our eyes are complicated with many components. In this chapter, an attempt is made to describe these components, thus giving some insight into the properties and functionality of human vision. </p> <div class="mw-heading mw-heading4"><h4 id="Getting_inside_of_the_eyeball_-_Pupil,_iris_and_the_lens"><span id="Getting_inside_of_the_eyeball_-_Pupil.2C_iris_and_the_lens"></span>Getting inside of the eyeball - Pupil, iris and the lens</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Anatomy&amp;veaction=edit&amp;section=T-4" title="Edit section: Getting inside of the eyeball - Pupil, iris and the lens" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Anatomy&amp;action=edit&amp;section=T-4" title="Edit section&#039;s source code: Getting inside of the eyeball - Pupil, iris and the lens"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Light rays enter the eye structure through the black aperture or pupil in the front of the eye. The black appearance is due to the light being fully absorbed by the tissue inside the eye. Only through this pupil can light enter into the eye which means the amount of incoming light is effectively determined by the size of the pupil. A pigmented sphincter surrounding the pupil functions as the eye's aperture stop. It is the amount of pigment in this iris, that give rise to the various eye colours found in humans. </p><p>In addition to this layer of pigment, the iris has 2 layers of ciliary muscles. A circular muscle called the pupillary sphincter in one layer, that contracts to make the pupil smaller. The other layer has a smooth muscle called the pupillary dilator, which contracts to dilate the pupil. The combination of these muscles can thereby dilate/contract the pupil depending on the requirements or conditions of the person. The ciliary muscles are controlled by ciliary zonules, fibres that also change the shape of the lens and hold it in place. </p><p>The lens is situated immediately behind the pupil. Its shape and characteristics reveal a similar purpose to that of camera lenses, but they function in slightly different ways. The shape of the lens is adjusted by the pull of the ciliary zonules, which consequently changes the focal length. Together with the cornea, the lens can change the focus, which makes it a very important structure indeed, however only one third of the total optical power of the eye is due to the lens itself. It is also the eye's main filter. Lens fibres make up most of the material for the lense, which are long and thin cells void of most of the cell machinery to promote transparency. Together with water soluble proteins called crystallins, they increase the refractive index of the lens. The fibres also play part in the structure and shape of the lens itself. </p> <figure class="mw-halign-center" typeof="mw:File/Thumb"><a href="/wiki/File:Schematic_diagram_of_the_human_eye_en.svg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/1/1e/Schematic_diagram_of_the_human_eye_en.svg/500px-Schematic_diagram_of_the_human_eye_en.svg.png" decoding="async" width="500" height="508" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/1/1e/Schematic_diagram_of_the_human_eye_en.svg/750px-Schematic_diagram_of_the_human_eye_en.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/1/1e/Schematic_diagram_of_the_human_eye_en.svg/1000px-Schematic_diagram_of_the_human_eye_en.svg.png 2x" data-file-width="416" data-file-height="423" /></a><figcaption>Schematic diagram of the human eye</figcaption></figure> <div class="mw-heading mw-heading4"><h4 id="Beamforming_in_the_eye_–_Cornea_and_its_protecting_agent_-_Sclera"><span id="Beamforming_in_the_eye_.E2.80.93_Cornea_and_its_protecting_agent_-_Sclera"></span>Beamforming in the eye – Cornea and its protecting agent - Sclera</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Anatomy&amp;veaction=edit&amp;section=T-5" title="Edit section: Beamforming in the eye – Cornea and its protecting agent - Sclera" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Anatomy&amp;action=edit&amp;section=T-5" title="Edit section&#039;s source code: Beamforming in the eye – Cornea and its protecting agent - Sclera"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <figure typeof="mw:File/Thumb"><a href="/wiki/File:Structure_cornea1.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/1/13/Structure_cornea1.png/200px-Structure_cornea1.png" decoding="async" width="200" height="159" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/1/13/Structure_cornea1.png/300px-Structure_cornea1.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/1/13/Structure_cornea1.png/400px-Structure_cornea1.png 2x" data-file-width="821" data-file-height="654" /></a><figcaption>Structure of the Cornea</figcaption></figure> <p>The cornea, responsible for the remaining 2/3 of the total optical power of the eye, covers the iris, pupil and lens. It focuses the rays that pass through the iris before they pass through the lens. The cornea is only 0.5mm thick and consists of 5 layers: </p> <ul><li>Epithelium: A layer of epithelial tissue covering the surface of the cornea.</li> <li>Bowman's membrane: A thick protective layer composed of strong collagen fibres, that maintain the overall shape of the cornea.</li> <li>Stroma: A layer composed of parallel collagen fibrils. This layer makes up 90% of the cornea's thickness.</li> <li>Descemet's membrane and Endothelium: Are two layers adjusted to the anterior chamber of the eye filled with aqueous humor fluid produced by the ciliary body. This fluid moisturises the lens, cleans it and maintains the pressure in the eye ball. The chamber, positioned between cornea and iris, contains a trabecular meshwork body through which the fluid is drained out by Schlemm canal, through posterior chamber.</li></ul> <p>The surface of the cornea lies under two protective membranes, called the sclera and Tenon’s capsule. Both of these protective layers completely envelop the eyeball. The sclera is built from collagen and elastic fibres, which protect the eye from external damages, this layer also gives rise to the white of the eye. It is pierced by nerves and vessels with the largest hole reserved for the optic nerve. Moreover, it is covered by conjunctiva, which is a clear mucous membrane on the surface of the eyeball. This membrane also lines the inside of the eyelid. It works as a lubricant and, together with the lacrimal gland, it produces tears, that lubricate and protect the eye. The remaining protective layer, the eyelid, also functions to spread this lubricant around. </p> <div class="mw-heading mw-heading4"><h4 id="Moving_the_eyes_–_extra-ocular_muscles"><span id="Moving_the_eyes_.E2.80.93_extra-ocular_muscles"></span>Moving the eyes – extra-ocular muscles</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Anatomy&amp;veaction=edit&amp;section=T-6" title="Edit section: Moving the eyes – extra-ocular muscles" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Anatomy&amp;action=edit&amp;section=T-6" title="Edit section&#039;s source code: Moving the eyes – extra-ocular muscles"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The eyeball is moved by a complicated muscle structure of extra-ocular muscles consisting of four rectus muscles – inferior, medial, lateral and superior and two oblique – inferior and superior. Positioning of these muscles is presented below, along with functions: </p> <figure typeof="mw:File/Thumb"><a href="/wiki/File:ExtraOcular_Muscles.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/8/83/ExtraOcular_Muscles.png/600px-ExtraOcular_Muscles.png" decoding="async" width="600" height="462" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/8/83/ExtraOcular_Muscles.png/900px-ExtraOcular_Muscles.png 1.5x, //upload.wikimedia.org/wikipedia/commons/8/83/ExtraOcular_Muscles.png 2x" data-file-width="1200" data-file-height="923" /></a><figcaption>Extra-ocular muscles: Green - Lateral Rectus; Red - Medial Rectus; Cyan - Superior Rectus; Pink - Inferior Rectus; Dark Blue - Superior Oblique; Yellow - Inferior Oblique.</figcaption></figure> <p>As you can see, the extra-ocular muscles (2,3,4,5,6,8) are attached to the sclera of the eyeball and originate in the annulus of Zinn, a fibrous tendon surrounding the optic nerve. A pulley system is created with the trochlea acting as a pulley and the superior oblique muscle as the rope, this is required to redirect the muscle force in the correct way. The remaining extra-ocular muscles have a direct path to the eye and therefore do not form these pulley systems. Using these extra-ocular muscles, the eye can rotate up, down, left, right and alternative movements are possible as a combination of these. </p><p>Other movements are also very important for us to be able to see. Vergence movements enable the proper function of binocular vision. Unconscious fast movements called saccades, are essential for people to keep an object in focus. The saccade is a sort of jittery movement performed when the eyes are scanning the visual field, in order to displace the point of fixation slightly. When you follow a moving object with your gaze, your eyes perform what is referred to as smooth pursuit. Additional involuntary movements called nystagmus are caused by signals from the vestibular system, together they make up the vestibulo-ocular reflexes. </p><p>The brain stem controls all of the movements of the eyes, with different areas responsible for different movements. </p> <ul><li>Pons: Rapid horizontal movements, such as saccades or nystagmus</li> <li>Mesencephalon: Vertical and torsional movements</li> <li>Cerebellum: Fine tuning</li> <li>Edinger-Westphal nucleus: Vergence movements</li></ul> <div class="mw-heading mw-heading4"><h4 id="Where_the_vision_reception_occurs_–_The_retina"><span id="Where_the_vision_reception_occurs_.E2.80.93_The_retina"></span>Where the vision reception occurs – The retina</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Anatomy&amp;veaction=edit&amp;section=T-7" title="Edit section: Where the vision reception occurs – The retina" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Anatomy&amp;action=edit&amp;section=T-7" title="Edit section&#039;s source code: Where the vision reception occurs – The retina"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <figure class="mw-halign-center" typeof="mw:File/Thumb"><a href="/wiki/File:Filtering_em_eye.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/d/d2/Filtering_em_eye.png/400px-Filtering_em_eye.png" decoding="async" width="400" height="269" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/d/d2/Filtering_em_eye.png/600px-Filtering_em_eye.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/d/d2/Filtering_em_eye.png/800px-Filtering_em_eye.png 2x" data-file-width="920" data-file-height="618" /></a><figcaption>Filtering of the light performed by the cornea, lens and pigment epithelium</figcaption></figure> <p>Before being transduced, incoming EM passes through the cornea, lens and the macula. These structures also act as filters to reduce unwanted EM, thereby protecting the eye from harmful radiation. The filtering response of each of these elements can be seen in the figure "Filtering of the light performed by cornea, lens and pigment epithelium". As one may observe, the cornea attenuates the lower wavelengths, leaving the higher wavelengths nearly untouched. The lens blocks around 25% of the EM below 400nm and more than 50% below 430nm. Finally, the pigment ephithelium, the last stage of filtering before the photo-reception, affects around 30% of the EM between 430nm and 500nm. </p><p>A part of the eye, which marks the transition from non-photosensitive region to photosensitive region, is called the ora serrata. The photosensitive region is referred to as the retina, which is the sensory structure in the back of the eye. The retina consists of multiple layers presented below with millions of photoreceptors called rods and cones, which capture the light rays and convert them into electrical impulses. Transmission of these impulses is nervously initiated by the ganglion cells and conducted through the optic nerve, the single route by which information leaves the eye. </p> <figure class="mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:Retina_layers.svg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/6/67/Retina_layers.svg/300px-Retina_layers.svg.png" decoding="async" width="300" height="438" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/6/67/Retina_layers.svg/450px-Retina_layers.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/6/67/Retina_layers.svg/600px-Retina_layers.svg.png 2x" data-file-width="649" data-file-height="947" /></a><figcaption>Structure of retina including the main cell components: RPE: retinal pigment epithelium; OS: outer segment of the photoreceptor cells; IS: inner segment of the photoreceptor cells; ONL: outer nuclear layer; OPL: outer plexiform layer; INL: inner nuclear layer IPL: inner plexiform layer; GC: ganglion cell layer; P: pigment epithelium cell; BM: Bruch-Membran; R: rods; C: cones; H: horizontal cell; B: bipolar cell; M: Müller cell; A:amacrine cell; G: ganglion cell; AX: Axon; arrow: Membrane limitans externa.</figcaption></figure> <p>A conceptual illustration of the structure of the retina is shown on the right. As we can see, there are five main cell types: </p> <ul><li>photoreceptor cells</li> <li>horizontal cells</li> <li>bipolar cells</li> <li>amacrine cells</li> <li>ganglion cells</li></ul> <p>Photoreceptor cells can be further subdivided into two main types called rods and cones. Cones are much less numerous than rods in most parts of the retina, but there is an enormous aggregation of them in the macula, especially in its central part called the fovea. In this central region, each photo-sensitive cone is connected to one ganglion-cell. In addition, the cones in this region are slightly smaller than the average cone size, meaning you get more cones per area. Because of this ratio, and the high density of cones, this is where we have the highest visual acuity. </p> <figure typeof="mw:File/Thumb"><a href="/wiki/File:Distribution_of_Cones_and_Rods_on_Human_Retina.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/1/1f/Distribution_of_Cones_and_Rods_on_Human_Retina.png/545px-Distribution_of_Cones_and_Rods_on_Human_Retina.png" decoding="async" width="545" height="341" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/1/1f/Distribution_of_Cones_and_Rods_on_Human_Retina.png/818px-Distribution_of_Cones_and_Rods_on_Human_Retina.png 1.5x, //upload.wikimedia.org/wikipedia/commons/1/1f/Distribution_of_Cones_and_Rods_on_Human_Retina.png 2x" data-file-width="891" data-file-height="557" /></a><figcaption>Distribution of Cones and Rods on Human Retina</figcaption></figure> <p>There are 3 types of human cones, each of the cones responding to a specific range of wavelengths, because of three types of a pigment called photopsin. Each pigment is sensitive to red, blue or green wavelength of light, so we have blue, green and red cones, also called S-, M- and L-cones for their sensitivity to short-, medium- and long-wavelength respectively. It consists of protein called opsin and a bound chromphore called the retinal. The main building blocks of the cone cell are the synaptic terminal, the inner and outer segments, the interior nucleus and the mitochondria. </p><p>The spectral sensitivities of the 3 types of cones: </p> <ul><li>1. S-cones absorb short-wave light, i.e. blue-violet light. The maximum absorption wavelength for the S-cones is 420nm</li> <li>2. M-cones absorb blue-green to yellow light. In this case The maximum absorption wavelength is 535nm</li> <li>3. L-cones absorb yellow to red light. The maximum absorption wavelength is 565nm</li></ul> <figure class="mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:Cone_cell_en.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/a/a0/Cone_cell_en.png/250px-Cone_cell_en.png" decoding="async" width="250" height="343" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/a/a0/Cone_cell_en.png/375px-Cone_cell_en.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/a/a0/Cone_cell_en.png/500px-Cone_cell_en.png 2x" data-file-width="875" data-file-height="1200" /></a><figcaption>Cone cell structure</figcaption></figure> <p>The inner segment contains organelles and the cell's nucleus and organelles. The pigment is located in the outer segment, attached to the membrane as trans-membrane proteins within the invaginations of the cell-membrane that form the membranous disks, which are clearly visible in the figure displaying the basic structure of rod and cone cells. The disks maximize the reception area of the cells. The cone photoreceptors of many vertebrates contain spherical organelles called oil droplets, which are thought to constitute intra-ocular filters which may serve to increase contrast, reduce glare and lessen chromatic aberrations caused by the mitochondrial size gradient from the periphery to the centres. </p><p>Rods have a structure similar to cones, however they contain the pigment rhodopsin instead, which allows them to detect low-intensity light and makes them 100 times more sensitive than cones. Rhodopsin is the only pigment found in human rods, and it is found on the outer side of the pigment epithelium, which similarly to cones maximizes absorption area by employing a disk structure. Similarly to cones, the synaptic terminal of the cell joins it with a bipolar cell and the inner and outer segments are connected by cilium. </p><p>The pigment rhodopsin absorbs the light between 400-600nm, with a maximum absorption at around 500nm. This wavelength corresponds to greenish-blue light which means blue colours appear more intense in relation to red colours at night. </p> <figure class="mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:Rod_Cell.svg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/e/e7/Rod_Cell.svg/250px-Rod_Cell.svg.png" decoding="async" width="250" height="305" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/e/e7/Rod_Cell.svg/375px-Rod_Cell.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/e/e7/Rod_Cell.svg/500px-Rod_Cell.svg.png 2x" data-file-width="861" data-file-height="1051" /></a><figcaption>Rod cell structure</figcaption></figure> <figure class="mw-halign-center" typeof="mw:File/Thumb"><a href="/wiki/File:Cone-absorbance-en.svg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/9/9c/Cone-absorbance-en.svg/500px-Cone-absorbance-en.svg.png" decoding="async" width="500" height="315" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/9/9c/Cone-absorbance-en.svg/750px-Cone-absorbance-en.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/9/9c/Cone-absorbance-en.svg/1000px-Cone-absorbance-en.svg.png 2x" data-file-width="550" data-file-height="346" /></a><figcaption>The sensitivity of cones and rods across visible EM</figcaption></figure> <p>EM waves with wavelengths outside the range of 400 – 700 nm are not detected by either rods nor cones, which ultimately means they are not visible to human beings. </p><p>Horizontal cells occupy the inner nuclear layer of the retina. There are two types of horizontal cells and both types hyper-polarise in response to light i.e. they become more negative. Type A consists of a subtype called HII-H2 which interacts with predominantly S-cones. Type B cells have a subtype called HI-H1, which features a dendrite tree and an axon. The former contacts mostly M- and L-cone cells and the latter rod cells. Contacts with cones are made mainly by prohibitory synapses, while the cells themselves are joined into a network with gap junctions. </p> <figure class="mw-halign-center" typeof="mw:File/Thumb"><a href="/wiki/File:Fig_retine.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/2/21/Fig_retine.png/500px-Fig_retine.png" decoding="async" width="500" height="216" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/2/21/Fig_retine.png 1.5x" data-file-width="624" data-file-height="269" /></a><figcaption>Cross-section of the human retina, with bipolar cells indicated in red.</figcaption></figure> <p>Bipolar cells spread single dendrites in the outer plexiform layer and the perikaryon, their cell bodies, are found in the inner nuclear layer. Dendrites interconnect exclusively with cones and rods and we differentiate between one rod bipolar cell and nine or ten cone bipolar cells. These cells branch with amacrine or ganglion cells in the inner plexiform layer using an axon. Rod bipolar cells connect to triad synapses or 18-70 rod cells. Their axons spread around the inner plexiform layer synaptic terminals, which contain ribbon synapses and contact a pair of cell processes in dyad synapses. They are connected to ganglion cells with AII amacrine cell links. </p><p>Amecrine cells can be found in the inner nuclear layer and in the ganglion cell layer of the retina. Occasionally they are found in the inner plexiform layer, where they work as signal modulators. They have been classified as narrow-field, small-field, medium-field or wide-field depending on their size. However, many classifications exist leading to over 40 different types of amecrine cells. </p><p>Ganglion cells are the final transmitters of visual signal from the retina to the brain. The most common ganglion cells in the retina is the midget ganglion cell and the parasol ganglion cell. The signal after having passed through all the retinal layers is passed on to these cells which are the final stage of the retinal processing chain. All the information is collected here forwarded to the retinal nerve fibres and optic nerves. The spot where the ganglion axons fuse to create an optic nerve is called the optic disc. This nerve is built mainly from the retinal ganglion axons and Portort cells. The majority of the axons transmit data to the lateral geniculate nucleus, which is a termination nexus for most parts of the nerve and which forwards the information to the visual cortex. Some ganglion cells also react to light, but because this response is slower than that of rods and cones, it is believed to be related to sensing ambient light levels and adjusting the biological clock. </p> <div class="mw-heading mw-heading2"><h2 id="Signal_Processing">Signal Processing</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;veaction=edit&amp;section=T-1" title="Edit section: Signal Processing" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;action=edit&amp;section=T-1" title="Edit section&#039;s source code: Signal Processing"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>As mentioned before the retina is the main component in the eye, because it contains all the light sensitive cells. Without it, the eye would be comparable to a digital camera without the CCD (Charge Coupled Device) sensor. This part elaborates on how the retina perceives the light, how the optical signal is transmitted to the brain and how the brain processes the signal to form enough information for decision making. </p> <div class="mw-heading mw-heading4"><h4 id="Creation_of_the_initial_signals_-_Photosensor_Function">Creation of the initial signals - Photosensor Function</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;veaction=edit&amp;section=T-2" title="Edit section: Creation of the initial signals - Photosensor Function" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;action=edit&amp;section=T-2" title="Edit section&#039;s source code: Creation of the initial signals - Photosensor Function"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Vision invariably starts with light hitting the photo-sensitive cells found in the retina. Light-absorbing visual pigments, a variety of enzymes and transmitters in retinal rods and cones will initiate the conversion from visible EM stimuli into electrical impulses, in a process known as photoelectric transduction. Using rods as an example, the incoming visible EM hits rhodopsin molecules, transmembrane molecules found in the rods' outer disk structure. Each rhodopsin molecule consists of a cluster of helices called opsin that envelop and surround 11-cis retinal, which is the part of the molecule that will change due to the energy from the incoming photons. In biological molecules, moieties, or parts of molecules that will cause conformational changes due to this energy is sometimes referred to as chromophores. 11-cis retinal straightens in response to the incoming energy, turning into retinal (all-trans retinal), which forces the opsin helices further apart, causing particular reactive sites to be uncovered. This "activated" rhodopsin molecule is sometimes referred to as Metarhodopsin II. From this point on, even if the visible light stimulation stops, the reaction will continue. The Metarhodopsin II can then react with roughly 100 molecules of a G<sub>s</sub> protein called transducing, which then results in a<sub>s</sub> and ß? after the GDP is converted into GTP. The activated a<sub>s</sub>-GTP then binds to cGMP-phosphodiesterase(PDE), suppressing normal ion-exchange functions, which results in a low cytosol concentration of cation ions, and therefore a change in the polarisation of the cell. </p><p>The natural photoelectric transduction reaction has an amazing power of amplification. One single retinal rhodopsin molecule activated by a single quantum of light causes the hydrolysis of up to 10<sup>6</sup> cGMP molecules per second. </p> <div class="mw-heading mw-heading5"><h5 id="Photo_Transduction">Photo Transduction</h5><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;veaction=edit&amp;section=T-3" title="Edit section: Photo Transduction" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;action=edit&amp;section=T-3" title="Edit section&#039;s source code: Photo Transduction"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <figure typeof="mw:File/Thumb"><a href="/wiki/File:Phototransduction.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/d/de/Phototransduction.png/500px-Phototransduction.png" decoding="async" width="500" height="211" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/d/de/Phototransduction.png/750px-Phototransduction.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/d/de/Phototransduction.png/1000px-Phototransduction.png 2x" data-file-width="1327" data-file-height="561" /></a><figcaption>Representation of molecular steps in photoactivation (modified from Leskov et al., 2000). Depicted is an outer membrane disk in a rod. Step 1: Incident photon (hν) is absorbed and activates a rhodopsin by conformational change in the disk membrane to R*. Step 2: Next, R* makes repeated contacts with transducin molecules, catalyzing its activation to G* by the release of bound GDP in exchange for cytoplasmic GTP (Step 3). The α and γ subunit G* binds inhibitory γ subunits of the phosphodiesterase (PDE) activating its α and ß subunits. Step 4: Activated PDE hydrolyzes cGMP. Step 5: Guanylyl cyclase (GC) synthesizes cGMP, the second messenger in the phototransduction cascade. Reduced levels of cytosolic cGMP cause cyclic nucleotide gated channels to close preventing further influx of Na+ and Ca2+. </figcaption></figure> <ol><li>A light photon interacts with the <a href="/w/index.php?title=Retinal&amp;action=edit&amp;redlink=1" class="new" title="Retinal (does not exist)">retinal</a> in a <a href="/w/index.php?title=Photoreceptor&amp;action=edit&amp;redlink=1" class="new" title="Photoreceptor (does not exist)">photoreceptor</a>. The retinal undergoes <a href="/w/index.php?title=Isomerisation&amp;action=edit&amp;redlink=1" class="new" title="Isomerisation (does not exist)">isomerisation</a>, changing from the 11-<i>cis</i> to all-<i>trans</i> configuration.</li> <li><a href="/w/index.php?title=Retinal&amp;action=edit&amp;redlink=1" class="new" title="Retinal (does not exist)">Retinal</a> no longer fits into the opsin binding site.</li> <li>Opsin therefore undergoes a conformational change to metarhodopsin II.</li> <li>Metarhodopsin II is unstable and splits, yielding opsin and all-<i>trans</i> retinal.</li> <li>The opsin activates the regulatory protein <a href="/w/index.php?title=Transducin&amp;action=edit&amp;redlink=1" class="new" title="Transducin (does not exist)">transducin</a>. This causes transducin to dissociate from its bound GDP, and bind GTP, then the alpha subunit of transducin dissociates from the beta and gamma subunits, with the GTP still bound to the alpha subunit.</li> <li>The alpha subunit-GTP complex activates <a href="/w/index.php?title=Phosphodiesterase&amp;action=edit&amp;redlink=1" class="new" title="Phosphodiesterase (does not exist)">phosphodiesterase</a>.</li> <li>Phosphodiesterase breaks down cGMP to 5'-GMP. This lowers the concentration of cGMP and therefore the sodium channels close.</li> <li>Closure of the sodium channels causes hyperpolarization of the cell due to the ongoing potassium current.</li> <li>Hyperpolarization of the cell causes voltage-gated calcium channels to close.</li> <li>As the calcium level in the photoreceptor cell drops, the amount of the neurotransmitter glutamate that is released by the cell also drops. This is because calcium is required for the glutamate-containing vesicles to fuse with cell membrane and release their contents.</li> <li>A decrease in the amount of glutamate released by the photoreceptors causes depolarization of On center bipolar cells (rod and cone On bipolar cells) and hyperpolarization of cone Off bipolar cells.</li></ol> <p>Without visible EM stimulation, rod cells containing a cocktail of ions, proteins and other molecules, have membrane potential differences of around -40mV. Compared to other nerve cells, this is quite high (-65mV). In this state, the neurotransmitter glutamate is continuously released from the axon terminals and absorbed by the neighbouring bipolar cells. With incoming visble EM and the previously mentioned cascade reaction, the potential difference drops to -70mV. This hyper-polarisation of the cell causes a reduction in the amount of released glutamate, thereby affecting the activity of the bipolar cells, and subsequently the following steps in the visual pathway. </p><p>Similar processes exist in the cone-cells and in photosensitive ganglion cells, but make use of different opsins. Photopsin I through III (yellowish-green, green and blue-violet respectively) are found in the three different cone cells and melanopsin (blue) can be found in the photosensitive ganglion cells. </p> <div class="mw-heading mw-heading4"><h4 id="Processing_Signals_in_the_Retina">Processing Signals in the Retina</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;veaction=edit&amp;section=T-4" title="Edit section: Processing Signals in the Retina" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;action=edit&amp;section=T-4" title="Edit section&#039;s source code: Processing Signals in the Retina"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <figure class="mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:Receptive_field.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/1/16/Receptive_field.png/300px-Receptive_field.png" decoding="async" width="300" height="549" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/1/16/Receptive_field.png 1.5x" data-file-width="403" data-file-height="737" /></a><figcaption></figcaption></figure> <p>Different bipolar cells react differently to the changes in the released glutamate. The so called ON and OFF bipolar cells are used to form the direct signal flow from cones to bipolar cells. The ON bipolar cells will depolarise by visible EM stimulation and the corresponding ON ganglion cells will be activated. On the other hand the OFF bipolar cells are hyper polarised by the visible EM stimulation, and the OFF ganglion cells are inhibited. This is the basic pathway of the Direct signal flow. The Lateral signal flow will start from the rods, then go to the bipolar cells, the amacrine cells, and the OFF bipolar cells inhibited by the Rod-amacrine cells and the ON bipolar cells will stimulated via an electrical synapse, after all of the previous steps, the signal will arrive at the ON or OFF ganglion cells and the whole pathway of the Lateral signal flow is established. </p><p>When the action potential (AP) in ON, ganglion cells will be triggered by the visible EM stimulus. The AP frequency will increase when the sensor potential increases. In other words, AP depends on the amplitude of the sensor's potential. The region of ganglion cells where the stimulatory and inhibitory effects influence the AP frequency is called receptive field (RF). Around the ganglion cells, the RF is usually composed of two regions: the central zone and the ring-like peripheral zone. They are distinguishable during visible EM adaptation. A visible EM stimulation on the centric zone could lead to AP frequency increase and the stimulation on the periphery zone will decrease the AP frequency. When the light source is turned off the excitation occurs. So the name of ON field (central field ON) refers to this kind of region. Of course the RF of the OFF ganglion cells act the opposite way and is therefore called "OFF field" (central field OFF). The RFs are organised by the horizontal cells. The impulse on the periphery region will be impulsed and transmitted to the central region, and there the so-called stimulus contrast is formed. This function will make the dark seem darker and the light brighter. If the whole RF is exposed to light. the impulse of the central region will predominate. </p> <div class="mw-heading mw-heading4"><h4 id="Signal_Transmission_to_the_Cortex">Signal Transmission to the Cortex</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;veaction=edit&amp;section=T-5" title="Edit section: Signal Transmission to the Cortex" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;action=edit&amp;section=T-5" title="Edit section&#039;s source code: Signal Transmission to the Cortex"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>As mentioned previously, axons of the ganglion cells converge at the optic disk of the retina, forming the optic nerve. These fibres are positioned inside the bundle in a specific order. Fibres from the macular zone of the retina are in the central portion, and those from the temporal half of the retina take up the periphery part. A partial decussation or crossing occurs when these fibres are outside the eye cavity. The fibres from the nasal halves of each retina cross to the opposite halves and extend to the brain. Those from the temporal halves remain uncrossed. This partial crossover is called the optic chiasma, and the optic nerves past this point are called optic tracts, mainly to distinguish them from single-retinal nerves. The function of the partial crossover is to transmit the right-hand visual field produced by both eyes to the left-hand half of the brain only and vice versa. Therefore the information from the right half of the body, and the right visual field, is all transmitted to the left-hand part of the brain when reaches the posterior part of the fore-brain (diencephalon). </p> <figure class="mw-halign-center" typeof="mw:File/Thumb"><a href="/wiki/File:Gray722.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/c/c0/Gray722.png/350px-Gray722.png" decoding="async" width="350" height="486" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/c/c0/Gray722.png 1.5x" data-file-width="360" data-file-height="500" /></a><figcaption>The pathway to the central cortex</figcaption></figure> <p>The information relay between the fibers of optic tracts and the nerve cells occurs in the lateral geniculate bodies, the central part of the visual signal processing, located in the thalamus of the brain. From here the information is passed to the nerve cells in the occipital cortex of the corresponding side of the brain. Connections from the retina to the brain can be separated into a 'parvocellular pathway' and a "magnocellular pathway". The parvocellular pathways signals color and fine detail, whereas the magnocellular pathways detect fast moving stimuli. </p> <figure class="mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:Magno_Parvocellular_Pathways.svg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/d/d2/Magno_Parvocellular_Pathways.svg/350px-Magno_Parvocellular_Pathways.svg.png" decoding="async" width="350" height="204" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/d/d2/Magno_Parvocellular_Pathways.svg/525px-Magno_Parvocellular_Pathways.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/d/d2/Magno_Parvocellular_Pathways.svg/700px-Magno_Parvocellular_Pathways.svg.png 2x" data-file-width="553" data-file-height="322" /></a><figcaption>Connections from the retina to the brain can be separated into a "parvocellular pathway" and a "magnocellular pathway". The parvocellular pathway originates in midget cells in the retina, and signals color and fine detail; magnocellular pathway starts with parasol cells, and detects fast moving stimuli.</figcaption></figure> <p>Signals from standard digital cameras correspond approximately to those of the parvocellular pathway. To simulate the responses of parvocellular pathways, researchers have been developing neuromorphic sensory systems, which try to mimic spike-based computation in neural systems. Thereby they use a scheme called "address-event representation" for the signal transmission in the neuromorphic electronic systems (Liu and Delbruck 2010 <a rel="nofollow" class="external autonumber" href="http://www.ncbi.nlm.nih.gov/pubmed/20493680">[1]</a>). </p><p>Anatomically, the retinal Magno and Parvo ganglion cells respectively project to 2 ventral magnocellular layers and 4 dorsal parvocellular layers of the Lateral Geniculate Nucleus (LGN). Each of the six LGN layers receives inputs from either the ipsilateral or contralateral eye, i.e., the ganglion cells of the left eye cross over and project to layer 1, 4 and 6 of the right LGN, and the right eye ganglion cells project (uncrossed) to its layer 2, 3 and 5. From here the information from the right and left eye is separated. </p><p>Although human vision is combined by two halves of the retina and the signal is processed by the opposite cerebral hemispheres, the visual field is considered as a smooth and complete unit. Hence the two visual cortical areas are thought of as being intimately connected. This connection, called corpus callosum is made of neurons, axons and dendrites. Because the dendrites make synaptic connections to the related points of the hemispheres, electric simulation of every point on one hemisphere indicates simulation of the interconnected point on the other hemisphere. The only exception to this rule is the primary visual cortex. </p><p>The synapses are made by the optic tract in the respective layers of the lateral geniculate body. Then these axons of these third-order nerve cells are passed up to the calcarine fissure in each occipital lobe of the cerebral cortex. Because bands of the white fibres and axons pair from the nerve cells in the retina go through it, it is called the striate cortex, which incidentally is our primary visual cortex, sometimes known as V1. At this point, impulses from the separate eyes converge to common cortical neurons, which then enables complete input from both eyes in one region to be used for perception and comprehension. Pattern recognition is a very important function of this particular part of the brain, with lesions causing problems with visual recognition or blindsight. </p><p>Based on the ordered manner in which the optic tract fibres pass information to the lateral geniculate bodies and after that pass in to the striate area, if one single point stimulation on the retina was found, the response which produced electrically in both lateral geniculate body and the striate cortex will be found at a small region on the particular retinal spot. This is an obvious point-to-point way of signal processing. And if the whole retina is stimulated, the responses will occur on both lateral geniculate bodies and the striate cortex gray matter area. It is possible to map this brain region to the retinal fields, or more usually the visual fields. </p><p>Any further steps in this pathway is beyond the scope of this book. Rest assured that, many further levels and centres exist, focusing on particular specific tasks, like for example colour, orientations, spatial frequencies, emotions etc. </p> <div class="mw-heading mw-heading3"><h3 id="Information_Processing_in_the_Visual_System">Information Processing in the Visual System</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;veaction=edit&amp;section=T-6" title="Edit section: Information Processing in the Visual System" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;action=edit&amp;section=T-6" title="Edit section&#039;s source code: Information Processing in the Visual System"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Equipped with a firmer understanding of some of the more important concepts of the signal processing in the visual system, comprehension or perception of the processed sensory information is the last important piece in the puzzle. Visual perception is the process of translating information received by the eyes into an understanding of the external state of things. It makes us aware of the world around us and allows us to understand it better. Based on visual perception we learn patterns which we then apply later in life and we make decisions based on this and the obtained information. In other words, our survival depends on perception. The field of Visual Perception has been divided into different subfields, due to the fact that processing is too complex and requires of different specialized mechanisms to perceive what is seen. These subfields include: Color Perception, Motion Perception, Depth Perception, and Face Recognition, etc. </p> <div class="mw-heading mw-heading4"><h4 id="Deep_Hierarchies_in_the_Primate_Visual_Cortex">Deep Hierarchies in the Primate Visual Cortex</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;veaction=edit&amp;section=T-7" title="Edit section: Deep Hierarchies in the Primate Visual Cortex" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;action=edit&amp;section=T-7" title="Edit section&#039;s source code: Deep Hierarchies in the Primate Visual Cortex"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <figure typeof="mw:File/Thumb"><a href="/wiki/File:Wiki_hierarchies.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/9/9d/Wiki_hierarchies.png/400px-Wiki_hierarchies.png" decoding="async" width="400" height="774" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/9/9d/Wiki_hierarchies.png/600px-Wiki_hierarchies.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/9/9d/Wiki_hierarchies.png/800px-Wiki_hierarchies.png 2x" data-file-width="1863" data-file-height="3606" /></a><figcaption>Deep hierarchies in the visual system</figcaption></figure> <p>Despite the ever-increasing computational power of electronic systems, there are still many tasks where animals and humans are vastly superior to computers – one of them being the perception and contextualization of information. The classical computer, either the one in your phone or a supercomputer taking up the whole room, is in essence a number-cruncher. It can perform an incredible amount of calculations in a miniscule amount of time. What it lacks is creating abstractions of the information it is working with. If you attach a camera to your computer, the picture it “perceives” is just a grid of pixels, a 2-dimensional array of numbers. A human would immediately recognize the geometry of the scene, the objects in the picture, and maybe even the context of what’s going on. This ability of ours is provided by dedicated biological machinery – the visual system of the brain. It processes everything we see in a hierarchical way, starting from simpler features of the image to more complex ones all the way to classification of objects into categories. Hence the visual system is said to have a deep hierarchy. The deep hierarchy of the primate visual system has inspired computer scientists to create models of artificial neural networks that would also feature several layers where each of them creates higher generalizations of the input data. </p><p>Approximately half of the human neocortex is dedicated to vision. The processing of visual information happens over at least 10 functional levels. The neurons in the early visual areas extract simple image features over small local regions of visual space. As the information gets transmitted to higher visual areas, neurons respond to increasingly complex features. With higher levels of information processing the representations become more invariant – less sensitive to the exact feature size, rotation or position. In addition, the receptive field size of neurons in higher visual areas increases, indicating that they are tuned to more global image features. This hierarchical structure allows for efficient computing – different higher visual areas can use the same information computed in the lower areas. The generic scene description that is made in the early visual areas is used by other parts of the brain to complete various different tasks, such as object recognition and categorization, grasping, manipulation, movement planning etc. </p> <div class="mw-heading mw-heading4"><h4 id="Sub-cortical_vision">Sub-cortical vision</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;veaction=edit&amp;section=T-8" title="Edit section: Sub-cortical vision" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;action=edit&amp;section=T-8" title="Edit section&#039;s source code: Sub-cortical vision"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The neural processing of visual information starts already before any of the cortical structures. Photoreceptors on the retina detect light and send signals to retinal ganglion cells. The receptive field size of a photoreceptor is one 100th of a degree (a one degree large receptive field is roughly the size of your thumb, when you have your arm stretched in front of you). The number of inputs to a ganglion cell and therefore its receptive field size depends on the location – in the center of the retina it receives signals from as few as five receptors, while in the periphery a single cell can have several thousand inputs. This implies that the highest spatial resolution is in the center of the retina, also called the fovea. Due to this property primates posses a gaze control mechanism that directs the eyesight so that the features of interest project onto the fovea. </p><p>Ganglion cells are selectively tuned to detect various features of the image, such as luminance contrast, color contrast, and direction and speed of movement. All of these features are the primary information used further up the processing pipeline. If there are visual stimuli that are not detectable by ganglion cells, then they are also not available for any cortical visual area. </p><p>Ganglion cells project to a region in thalamus called lateral geniculate nucleus (LGN), which in turn relays the signals to the cortex. There is no significant computation known to happen in LGN – there is almost a one-to-one correspondence between retinal ganglion and LGN cells. However, only 5% of the inputs to LGN come from the retina – all the other inputs are cortical feedback projections. Although the visual system is often regarded as a feed-forward system, the recurrent feedback connections as well as lateral connections are a common feature seen throughout the visual cortex. The role of the feedback is not yet fully understood but it is proposed to be attributed to processes like attention, expectation, imagination and filling-in the missing information. </p> <div class="mw-heading mw-heading4"><h4 id="Cortical_vision">Cortical vision</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;veaction=edit&amp;section=T-9" title="Edit section: Cortical vision" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;action=edit&amp;section=T-9" title="Edit section&#039;s source code: Cortical vision"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <figure typeof="mw:File/Thumb"><a href="/wiki/File:Wiki_brain_areas.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/8/86/Wiki_brain_areas.png/400px-Wiki_brain_areas.png" decoding="async" width="400" height="309" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/8/86/Wiki_brain_areas.png/600px-Wiki_brain_areas.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/8/86/Wiki_brain_areas.png/800px-Wiki_brain_areas.png 2x" data-file-width="1222" data-file-height="944" /></a><figcaption>Main areas of the visual system</figcaption></figure> <p>The visual cortex can be divided into three large parts – the occipital part which receives input from LGN and then sends outputs to dorsal and ventral streams. Occipital part includes the areas V1-V4 and MT, which process different aspects of visual information and gives rise to a generic scene representation. The dorsal pathway is involved in the analysis of space and in action planning. The ventral pathway is involved in object recognition and categorization. </p><p>V1 is the first cortical area that processes visual information. It is sensitive to edges, gratings, line-endings, motion, color and disparity (angular difference between the projections of a point onto the left and right retinas). The most straight forward example of the hierarchical bottom-up processing is the linear combination of the inputs from several ganglion cells with center-surround receptive fields to create a representation of a bar. This is done by the simple cells of V1 and was first described by the prominent neuroscientists Hubel and Wiesel. This type of information integration implies that the simple cells are sensitive to the exact location of the bar and have a relatively small receptive field. The complex cells of V1 receive inputs from the simple cells, and while also responding to linear oriented patterns they are not sensitive to the exact position of the bar and have a larger receptive field. The computation present in this step could be a MAX-like operation which produces responses similar in amplitude to the larger of the responses pertaining to the individual stimuli. Some simple and complex cells can also detect the end of a bar, and a fraction of V1 cells are also sensitive to local motion within their respective receptive fields. </p><p>Area V2 features more sophisticated contour representation including texture-defined contours, illusory contours and contours with border ownership. V2 also builds upon the absolute disparity detection in V1 and features cells that are sensitive to relative disparity which is the difference between the absolute disparities of two points in space. Area V4 receives inputs from V2 and area V3, but very little is known about the computation taking place in V3. Area V4 features neurons that are sensitive to contours with different curvature and vertices with particular angles. Another important feature is the coding for luminance-invariant hue. This is in contrast to V1 where neurons respond to color opponency along the two principle axis (red-green and yellow-blue) rather than the actual color. V4 further outputs to the ventral stream, to inferior temporal cortex (IT) which has been shown through lesion studies to be essential for object discrimination. </p> <div class="mw-heading mw-heading4"><h4 id="Inferior_temporal_cortex:_object_discrimination">Inferior temporal cortex: object discrimination</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;veaction=edit&amp;section=T-10" title="Edit section: Inferior temporal cortex: object discrimination" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;action=edit&amp;section=T-10" title="Edit section&#039;s source code: Inferior temporal cortex: object discrimination"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <figure typeof="mw:File/Thumb"><a href="/wiki/File:Stimulus_reduction.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/e/e2/Stimulus_reduction.png/500px-Stimulus_reduction.png" decoding="async" width="500" height="79" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/e/e2/Stimulus_reduction.png/750px-Stimulus_reduction.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/e/e2/Stimulus_reduction.png/1000px-Stimulus_reduction.png 2x" data-file-width="4000" data-file-height="629" /></a><figcaption>Stimulus reduction in area TE</figcaption></figure> <p>Inferior temporal cortex (IT) is divided into two areas: TEO and TE. Area TEO integrates information about the shapes and relative positions of multiple contour elements and features mostly cells which respond to simple combinations of features. The receptive field size of TEO neurons is about 3-5 degrees. Area TE features cells with significantly larger receptive fields (10-20 degrees) which respond to faces, hands and complex feature configurations. Cells in TE respond to visual features that are a simpler generalization of the object of interest but more complex than simple bars or spots. This was shown using a stimulus-reduction method by Tanaka et al. where first a response to an object is measured and then the object is replaced by simpler representations until the critical feature that the TE neurons are responding to is narrowed down. </p><p>It appears that the neurons in IT pull together various features of medium complexity from lower levels in the ventral stream to build models of object parts. The neurons in TE that are selective to specific objects have to fulfil two seemingly contradictory requirements – selectivity and invariance. They have to distinguish between different objects by the means of sensitivity to features in the retinal images. However, the same object can be viewed from different angles and distances at different light conditions yielding highly dissimilar retinal images of the same object. To treat all these images as equivalent, invariant features must be derived that are robust against certain transformations, such as changes in position, illumination, size on the retina etc. Neurons in area TE show invariance to position and size as well as to partial occlusion, position-in-depth and illumination direction. Rotation in depth has been shown to have the weakest invariance, with the exception if the object is a human face. </p><p>Object categories are not yet explicitly present in area TE – a neuron might typically respond to several but not all exemplars of the same category (e.g., images of trees) and it might also respond to exemplars of different categories (e.g., trees and non-trees). Object recognition and classification most probably involves sampling from a larger population of TE neurons as well as receiving inputs from additional brain areas, e.g., those that are responsible for understanding the context of the scene. Recent readout experiments have demonstrated that statistical classifiers (e.g. support vector machines) can be trained to classify objects based on the responses of a small number of TE neurons. Therefore, a population of TE neurons in principle can reliably signal object categories by their combined activity. Interestingly, there are also reports on highly selective neurons in medial temporal lobe that respond to very specific cues, e.g., to the tower of Pisa in different images or to a particular person’s face. </p> <div class="mw-heading mw-heading4"><h4 id="Learning_in_the_Visual_System">Learning in the Visual System</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;veaction=edit&amp;section=T-11" title="Edit section: Learning in the Visual System" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;action=edit&amp;section=T-11" title="Edit section&#039;s source code: Learning in the Visual System"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Learning can alter the visual feature selectivity of neurons, with the effect of learning becoming stronger at higher hierarchical levels. There is no known evidence on learning in the retina and also the orientation maps in V1 seem to be genetically largely predetermined. However, practising orientation identification improves orientation coding in V1 neurons, by increasing the slope of the tuning curve. Similar but larger effects have been seen in V4. In area TE relatively little visual training has noticeable physiological effects on visual perception, on a single cell level as well as in fMRI. For example, morphing two objects into each other increases their perceived similarity. Overall it seems that the even the adult visual cortex is considerably plastic, and the level of plasticity can be significantly increased, e.g., by administering specific drugs or by living in an enriched environment. </p> <div class="mw-heading mw-heading4"><h4 id="Deep_Neural_Networks">Deep Neural Networks</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;veaction=edit&amp;section=T-12" title="Edit section: Deep Neural Networks" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;action=edit&amp;section=T-12" title="Edit section&#039;s source code: Deep Neural Networks"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Similarly to the deep hierarchy of the primate visual system, deep learning architectures attempt to model high-level abstractions of the input data by using multiple levels of non-linear transformations. The model proposed by Hubel and Wiesel where information is integrated and propagated in a cascade from retina and LGN to simple cells and complex cells in V1 inspired the creation of one of the first deep learning architectures, the neocognitron – a multilayered artificial neural network model. It was used for different pattern recognition tasks, including the recognition of handwritten characters. However, it took a lot of time to train the network (in the order of days) and since its inception in the 1980s deep learning didn’t get much attention until the mid-2000s with the abundance of digital data and the invention of faster training algorithms. Deep neural networks have proved themselves to be very effective in tasks that not so long ago seemed possible only for humans to perform, such as recognizing the faces of particular people in photos, understanding human speech (to some extent) and translating text from foreign languages. Furthermore, they have proven to be of great assistance in industry and science to search for potential drug candidates, map real neural networks in the brain and predict the functions of proteins. It must be noted that deep learning is only very loosely inspired from the brain and is much more of an achievement of the field of computer science / machine learning than of neuroscience. The basic parallels are that the deep neural networks are composed of units that integrate information inputs in a non-linear manner (neurons) and send signals to each other (synapses) and that there are different levels of increasingly abstract representations of the data. The learning algorithms and mathematical descriptions of the “neurons” used in deep learning are very different from the actual processes taking place in the brain. Therefore, the research in deep learning, while giving a huge push to a more sophisticated artificial intelligence, can give only limited insights about the brain. </p> <table border="0" align="center"> <tbody><tr> <td> <figure class="mw-halign-left" typeof="mw:File/Thumb"><a href="/wiki/File:Example_of_a_neuron.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/9/94/Example_of_a_neuron.png/400px-Example_of_a_neuron.png" decoding="async" width="400" height="221" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/9/94/Example_of_a_neuron.png/600px-Example_of_a_neuron.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/9/94/Example_of_a_neuron.png/800px-Example_of_a_neuron.png 2x" data-file-width="1402" data-file-height="774" /></a><figcaption>Example of a neuron with its main components.</figcaption></figure> </td> <td> <figure class="mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:Example_of_a_neural_network%27s_neural_unit.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/3/39/Example_of_a_neural_network%27s_neural_unit.png/400px-Example_of_a_neural_network%27s_neural_unit.png" decoding="async" width="400" height="212" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/3/39/Example_of_a_neural_network%27s_neural_unit.png/600px-Example_of_a_neural_network%27s_neural_unit.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/3/39/Example_of_a_neural_network%27s_neural_unit.png/800px-Example_of_a_neural_network%27s_neural_unit.png 2x" data-file-width="1420" data-file-height="752" /></a><figcaption>Example of a base unit of the neural networks. In the example the activation function is a Rectified Linear Unit (ReLU), but there are also other possibilities, among which the sigmoid or the hyperbolic tangent. The bias changes the threshold of activation of the unit, and as such it is analogous to the value of the threshold for the action potential in the neuron.</figcaption></figure> </td> </tr> </tbody></table> <figure class="mw-halign-center" typeof="mw:File/Thumb"><a href="/wiki/File:Example_of_a_deep_neural_network.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/2/2f/Example_of_a_deep_neural_network.png/500px-Example_of_a_deep_neural_network.png" decoding="async" width="500" height="280" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/2/2f/Example_of_a_deep_neural_network.png/750px-Example_of_a_deep_neural_network.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/2/2f/Example_of_a_deep_neural_network.png/1000px-Example_of_a_deep_neural_network.png 2x" data-file-width="1114" data-file-height="624" /></a><figcaption>Example of a deep neural network. Each square represents one unit as described in the image above.</figcaption></figure> <div class="mw-heading mw-heading4"><h4 id="References">References</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;veaction=edit&amp;section=T-13" title="Edit section: References" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;action=edit&amp;section=T-13" title="Edit section&#039;s source code: References"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <dl><dt>Papers on the deep hierarchies in the visual system</dt> <dd></dd></dl> <ul><li><style data-mw-deduplicate="TemplateStyles:r4271529">.mw-parser-output cite.citation{font-style:inherit;word-wrap:break-word}.mw-parser-output .citation q{quotes:"\"""\"""'""'"}.mw-parser-output .citation:target{background-color:rgba(0,127,255,0.133)}.mw-parser-output .id-lock-free a,.mw-parser-output .citation .cs1-lock-free a{background:url("//upload.wikimedia.org/wikipedia/commons/6/65/Lock-green.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-limited a,.mw-parser-output .id-lock-registration a,.mw-parser-output .citation .cs1-lock-limited a,.mw-parser-output .citation .cs1-lock-registration a{background:url("//upload.wikimedia.org/wikipedia/commons/d/d6/Lock-gray-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-subscription a,.mw-parser-output .citation .cs1-lock-subscription a{background:url("//upload.wikimedia.org/wikipedia/commons/a/aa/Lock-red-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .cs1-ws-icon a{background:url("//upload.wikimedia.org/wikipedia/commons/4/4c/Wikisource-logo.svg")right 0.1em center/12px no-repeat}.mw-parser-output .cs1-code{color:inherit;background:inherit;border:none;padding:inherit}.mw-parser-output .cs1-hidden-error{display:none;color:#d33}.mw-parser-output .cs1-visible-error{color:#d33}.mw-parser-output .cs1-maint{display:none;color:#3a3;margin-left:0.3em}.mw-parser-output .cs1-format{font-size:95%}.mw-parser-output .cs1-kern-left{padding-left:0.2em}.mw-parser-output .cs1-kern-right{padding-right:0.2em}.mw-parser-output .citation .mw-selflink{font-weight:inherit}</style><cite id="CITEREFKrugerJanssenKalkanLappe2013" class="citation journal cs1">Kruger, N.; Janssen, P.; Kalkan, S.; Lappe, M.; Leonardis, A.; Piater, J.; Rodriguez-Sanchez, A. J.; Wiskott, L. (August 2013). "Deep Hierarchies in the Primate Visual Cortex: What Can We Learn for Computer Vision?". <i>IEEE Transactions on Pattern Analysis and Machine Intelligence</i>. <b>35</b> (8): 1847–1871. <a href="https://en.wikipedia.org/wiki/w:doi_(identifier)" class="extiw" title="w:w:doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FTPAMI.2012.272">10.1109/TPAMI.2012.272</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=IEEE+Transactions+on+Pattern+Analysis+and+Machine+Intelligence&amp;rft.atitle=Deep+Hierarchies+in+the+Primate+Visual+Cortex%3A+What+Can+We+Learn+for+Computer+Vision%3F&amp;rft.volume=35&amp;rft.issue=8&amp;rft.pages=1847-1871&amp;rft.date=2013-08&amp;rft_id=info%3Adoi%2F10.1109%2FTPAMI.2012.272&amp;rft.aulast=Kruger&amp;rft.aufirst=N.&amp;rft.au=Janssen%2C+P.&amp;rft.au=Kalkan%2C+S.&amp;rft.au=Lappe%2C+M.&amp;rft.au=Leonardis%2C+A.&amp;rft.au=Piater%2C+J.&amp;rft.au=Rodriguez-Sanchez%2C+A.+J.&amp;rft.au=Wiskott%2C+L.&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFPoggioRiesenhuber1999" class="citation journal cs1">Poggio, Tomaso; Riesenhuber, Maximilian (1 November 1999). <i>Nature Neuroscience</i>. <b>2</b> (11): 1019–1025. <a href="https://en.wikipedia.org/wiki/w:doi_(identifier)" class="extiw" title="w:w:doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/doi%3A10.1038%2F14819">doi:10.1038/14819</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Nature+Neuroscience&amp;rft.volume=2&amp;rft.issue=11&amp;rft.pages=1019-1025&amp;rft.date=1999-11-01&amp;rft.aulast=Poggio&amp;rft.aufirst=Tomaso&amp;rft.au=Riesenhuber%2C+Maximilian&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span> <span class="cs1-visible-error citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: </span><span class="cs1-visible-error citation-comment">Check <code class="cs1-code">&#124;doi=</code> value (<a href="/wiki/Help:CS1_errors#bad_doi" title="Help:CS1 errors">help</a>)</span>; <span class="cs1-visible-error citation-comment">Missing or empty <code class="cs1-code">&#124;title=</code> (<a href="/wiki/Help:CS1_errors#citation_missing_title" title="Help:CS1 errors">help</a>)</span></li></ul> <dl><dt>Stimulus reduction experiment</dt> <dd></dd></dl> <ul><li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFTanaka1996" class="citation journal cs1">Tanaka, Keiji (March 1996). "Inferotemporal Cortex and Object Vision". <i>Annual Review of Neuroscience</i>. <b>19</b> (1): 109–139. <a href="https://en.wikipedia.org/wiki/w:doi_(identifier)" class="extiw" title="w:w:doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1146%2Fannurev.ne.19.030196.000545">10.1146/annurev.ne.19.030196.000545</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Annual+Review+of+Neuroscience&amp;rft.atitle=Inferotemporal+Cortex+and+Object+Vision&amp;rft.volume=19&amp;rft.issue=1&amp;rft.pages=109-139&amp;rft.date=1996-03&amp;rft_id=info%3Adoi%2F10.1146%2Fannurev.ne.19.030196.000545&amp;rft.aulast=Tanaka&amp;rft.aufirst=Keiji&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span></li></ul> <dl><dt>Evidence on learning in the visual system</dt> <dd></dd></dl> <ul><li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFLiDiCarlo2010" class="citation journal cs1">Li, Nuo; DiCarlo, James J. (23 September 2010). "Unsupervised Natural Visual Experience Rapidly Reshapes Size-Invariant Object Representation in Inferior Temporal Cortex". <i>Neuron</i>. <b>67</b> (6): 1062–1075. <a href="https://en.wikipedia.org/wiki/w:doi_(identifier)" class="extiw" title="w:w:doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.neuron.2010.08.029">10.1016/j.neuron.2010.08.029</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Neuron&amp;rft.atitle=Unsupervised+Natural+Visual+Experience+Rapidly+Reshapes+Size-Invariant+Object+Representation+in+Inferior+Temporal+Cortex&amp;rft.volume=67&amp;rft.issue=6&amp;rft.pages=1062-1075&amp;rft.date=2010-09-23&amp;rft_id=info%3Adoi%2F10.1016%2Fj.neuron.2010.08.029&amp;rft.aulast=Li&amp;rft.aufirst=Nuo&amp;rft.au=DiCarlo%2C+James+J.&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFRaiguelVogelsMysoreOrban2006" class="citation journal cs1">Raiguel, S.; Vogels, R.; Mysore, S. G.; Orban, G. A. (14 June 2006). "Learning to See the Difference Specifically Alters the Most Informative V4 Neurons". <i>Journal of Neuroscience</i>. <b>26</b> (24): 6589–6602. <a href="https://en.wikipedia.org/wiki/w:doi_(identifier)" class="extiw" title="w:w:doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1523%2FJNEUROSCI.0457-06.2006">10.1523/JNEUROSCI.0457-06.2006</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Journal+of+Neuroscience&amp;rft.atitle=Learning+to+See+the+Difference+Specifically+Alters+the+Most+Informative+V4+Neurons&amp;rft.volume=26&amp;rft.issue=24&amp;rft.pages=6589-6602&amp;rft.date=2006-06-14&amp;rft_id=info%3Adoi%2F10.1523%2FJNEUROSCI.0457-06.2006&amp;rft.aulast=Raiguel&amp;rft.aufirst=S.&amp;rft.au=Vogels%2C+R.&amp;rft.au=Mysore%2C+S.+G.&amp;rft.au=Orban%2C+G.+A.&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFSchoupsVogelsQianOrban2001" class="citation journal cs1">Schoups, A; Vogels, R; Qian, N; Orban, G (2 August 2001). "Practising orientation identification improves orientation coding in V1 neurons". <i>Nature</i>. <b>412</b> (6846): 549–53. <a href="https://en.wikipedia.org/wiki/PMID_(identifier)" class="extiw" title="w:PMID (identifier)">PMID</a>&#160;<a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/11484056">11484056</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Nature&amp;rft.atitle=Practising+orientation+identification+improves+orientation+coding+in+V1+neurons.&amp;rft.volume=412&amp;rft.issue=6846&amp;rft.pages=549-53&amp;rft.date=2001-08-02&amp;rft_id=info%3Apmid%2F11484056&amp;rft.aulast=Schoups&amp;rft.aufirst=A&amp;rft.au=Vogels%2C+R&amp;rft.au=Qian%2C+N&amp;rft.au=Orban%2C+G&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span></li></ul> <dl><dt>A recent and accessible overview of the status quo of the deep learning research</dt> <dd></dd></dl> <ul><li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFJones2014" class="citation journal cs1">Jones, Nicola (8 January 2014). "Computer science: The learning machines". <i>Nature</i>. <b>505</b> (7482): 146–148. <a href="https://en.wikipedia.org/wiki/w:doi_(identifier)" class="extiw" title="w:w:doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1038%2F505146a">10.1038/505146a</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Nature&amp;rft.atitle=Computer+science%3A+The+learning+machines&amp;rft.volume=505&amp;rft.issue=7482&amp;rft.pages=146-148&amp;rft.date=2014-01-08&amp;rft_id=info%3Adoi%2F10.1038%2F505146a&amp;rft.aulast=Jones&amp;rft.aufirst=Nicola&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span></li></ul> <div class="mw-heading mw-heading3"><h3 id="Motion_Perception">Motion Perception</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;veaction=edit&amp;section=T-14" title="Edit section: Motion Perception" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;action=edit&amp;section=T-14" title="Edit section&#039;s source code: Motion Perception"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Motion Perception is the process of inferring speed and direction of moving objects. Area V5 in humans and area MT (Middle Temporal) in primates are responsible for cortical perception of Motion. Area V5 is part of the extrastriate cortex, which is the region in the occipital region of the brain next to the primary visual cortex. The function of Area V5 is to detect speed and direction of visual stimuli, and integrate local visual motion signals into global motion. Area V1 or Primary Visual cortex is located in the occipital lobe of the brain in both hemispheres. It processes the first stage of cortical processing of visual information. This area contains a complete map of the visual field covered by the eyes. The difference between area V5 and area V1 (Primary Visual Cortex) is that area V5 can integrate motion of local signals or individual parts of an object into a global motion of an entire object. Area V1, on the other hand, responds to local motion that occurs within the receptive field. The estimates from these many neurons are integrated in Area V5. </p><p>Movement is defined as changes in retinal illumination over space and time. Motion signals are classified into <i>First order motions</i> and <i>Second order motions</i>. These motion types are briefly described in the following paragraphs. </p> <figure class="mw-default-size" typeof="mw:File/Thumb"><a href="/wiki/File:Beta_movement.gif" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/0/0b/Beta_movement.gif" decoding="async" width="220" height="220" class="mw-file-element" data-file-width="220" data-file-height="220" /></a><figcaption>Example of a "Beta movement".</figcaption></figure> <p><i>First-order motion perception</i> refers to the motion perceived when two or more visual stimuli switch on and off over time and produce different motion perceptions. First order motion is also termed "apparent motion,” and it is used in television and film. An example of this is the "Beta movement", which is an illusion in which fixed images seem to move, even though they do not move in reality. These images give the appearance of motion, because they change and move faster than what the eye can detect. This optical illusion happens because the human optic nerve responds to changes of light at ten cycles per second, so any change faster than this rate will be registered as a continuum motion, and not as separate images. </p><p><i>Second order motion</i> refers to the motion that occurs when a moving contour is defined by contrast, texture, flicker or some other quality that does not result in an increase in luminance or motion energy of the image. Evidence suggests that early processing of First order motion and Second order motion is carried out by separate pathways. Second order mechanisms have poorer temporal resolution and are low-pass in terms of the range of spatial frequencies to which they respond. Second-order motion produces a weaker motion aftereffect. First and second-order signals are combined in are V5. </p><p>In this chapter, we will analyze the concepts of Motion Perception and Motion Analysis, and explain the reason why these terms should not be used interchangeably. We will analyze the mechanisms by which motion is perceived such as Motion Sensors and Feature Tracking. There exist three main theoretical models that attempt to describe the function of neuronal sensors of motion. Experimental tests have been conducted to confirm whether these models are accurate. Unfortunately, the results of these tests are inconclusive, and it can be said that no single one of these models describes the functioning of Motion Sensors entirely. However, each of these models simulates certain features of Motion Sensors. Some properties of these sensors are described. Finally, this chapter shows some motion illusions, which demonstrate that our sense of motion can be mislead by static external factors that stimulate motion sensors in the same way as motion. </p> <div class="mw-heading mw-heading5"><h5 id="Motion_Analysis_and_Motion_Perception">Motion Analysis and Motion Perception</h5><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;veaction=edit&amp;section=T-15" title="Edit section: Motion Analysis and Motion Perception" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;action=edit&amp;section=T-15" title="Edit section&#039;s source code: Motion Analysis and Motion Perception"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The concepts of <i>Motion Analysis</i> and <i>Motion Perception</i> are often confused as interchangeable. Motion Perception and Motion Analysis are important to each other, but they are not the same. </p><p><i>Motion Analysis</i> refers to the mechanisms in which motion signals are processed. In a similar way in which Motion Perception does not necessarily depend on signals generated by motion of images in the retina, Motion Analysis may or may not lead to motion perception. An example of this phenomenon is Vection, which occurs when a person perceives that she is moving when she is stationary, but the object that she observes is moving. Vection shows that motion of an object can be analyzed, even though it is not perceived as motion coming from the object. This definition of Motion analysis suggests that motion is a fundamental image property. In the visual field, it is analyzed at every point. The results from this analysis are used to derive perceptual information. </p><p><i>Motion Perception</i> refers to the process of acquiring perceptual knowledge about motion of objects and surfaces in an image. Motion is perceived either by delicate local sensors in the retina or by feature tracking. Local motion sensors are specialized neurons sensitive to motion, and analogous to specialized sensors for color. Feature tracking is an indirect way to perceive motion, and it consists of inferring motion from changes in retinal position of objects over time. It is also referred to as third order motion analysis. Feature tracking works by focusing attention to a particular object and observing how its position has changed over time. </p> <div class="mw-heading mw-heading5"><h5 id="Motion_Sensors">Motion Sensors</h5><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;veaction=edit&amp;section=T-16" title="Edit section: Motion Sensors" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;action=edit&amp;section=T-16" title="Edit section&#039;s source code: Motion Sensors"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Detection of motion is the first stage of visual processing, and it happens thanks to specialized neural processes, which respond to information regarding local changes of intensity of images over time. Motion is sensed independently of other image properties at all locations in the image. It has been proven that motion sensors exist, and they operate locally at all points in the image. Motion sensors are dedicated neuronal sensors located in the retina that are capable of detecting a motion produced by two brief and small light flashes that are so close together that they could not be detected by feature tracking. There exist three main models that attempt to describe the way that these specialized sensors work. These models are independent of one another, and they try to model specific characteristics of Motion Perception. Although there is not sufficient evidence to support that any of these models represent the way the visual system (motion sensors particularly) perceives motion, they still correctly model certain functions of these sensors. </p> <figure typeof="mw:File/Thumb"><a href="/wiki/File:Motion_Detectors.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/2/2e/Motion_Detectors.png/300px-Motion_Detectors.png" decoding="async" width="300" height="192" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/2/2e/Motion_Detectors.png/450px-Motion_Detectors.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/2/2e/Motion_Detectors.png/600px-Motion_Detectors.png 2x" data-file-width="1000" data-file-height="641" /></a><figcaption>Two different mechanisms for motion detection. Left) A "Reichardt detector" consists of two mirror-symmetrical subunits. In each subunit, the luminance values as measured in two adjacent points become multiplied (M) with each other after one of them is delayed by a low-pass filter with time-constant τ. The resulting output signals of the multipliers become finally subtracted. Right) In the gradient detector, the temporal luminance gradient as measured after one photoreceptor (δI/δt, Left) is divided by the spatial luminance gradient (δI/δx). Here, the spatial gradient is approximated by the difference between the luminance values in two adjacent points.</figcaption></figure> <p><i>The Reichardt Detector</i> </p><p> The Reichardt Detector is used to model how motion sensors respond to <i>First order motion signals</i>. When an objects moves from point A in the visual field to point B, two signals are generated: one before the movement began and another one after the movement has completed. This model perceives this motion by detecting changes in luminance at one point on the retina and correlating it with a change in luminance at another point nearby after a short delay. The Reichardt Detector operates based on the principle of correlation (statistical relation that involves dependency). It interprets a motion signal by spatiotemporal correlation of luminance signals at neighboring points. It uses the fact that two receptive fields at different points on the trajectory of a moving object receive a time shifted version of the same signal – a luminance pattern moves along an axis and the signal at one point in the axis is a time shifted version of a previous signal in the axis. The Reichardt Detector model has two spatially separate neighboring detectors. The output signals of the detectors are multiplied (correlated) in the following way: a signal multiplied by a second signal that is the time-shifted version of the original. The same procedure is repeated but in the reverse direction of motion (the signal that was time-shifted becomes the first signal and vice versa). Then, the difference between these two multiplications is taken, and the outcome gives the speed of motion. The response of the detector depends upon the stimulus’ phase, contrast and speed. Many detectors tuned at different speeds are necessary to encode the true speed of the pattern. The most compelling experimental evidence for this kind of detector comes from studies of direction discrimination of barely visible targets. </p><p><br /> <i>Motion-Energy Filtering</i> </p><p> Motion Energy Filter is a model of Motion Sensors based on the principle of phase invariant filters. This model builds spatio-temporal filters oriented in space-time to match the structure of moving patterns. It consists of separable filters, for which spatial profiles remain the same shape over time but are scaled by the value of the temporal filters. Motion Energy Filters match the structure of moving patterns by adding together separable filters. For each direction of motion, two space-time filters are generated: one, which is symmetric (bar-like), and one which is asymmetric (edge-like). The sum of the squares of these filters is called the motion energy. The difference in the signal for the two directions is called the opponent energy. This result is then divided by the squared output of another filter, which is tuned to static contrast. This division is performed to take into account the effect of contrast in the motion. Motion Energy Filters can model a number of motion phenomenon, but it produces a phase independent measurement, which increases with speed but does not give a reliable value of speed. </p><p><br /> <i>Spatiotemporal Gradients</i> </p><p><br /> </p> <dl><dd><span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle v={\frac {dx}{dt}}=-{\frac {\frac {\partial I(x,t)}{\partial t}}{\frac {\partial I(x,t)}{\partial x}}}=-{\frac {D_{t}I}{D_{x}I}}}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mi>v</mi> <mo>=</mo> <mrow class="MJX-TeXAtom-ORD"> <mfrac> <mrow> <mi>d</mi> <mi>x</mi> </mrow> <mrow> <mi>d</mi> <mi>t</mi> </mrow> </mfrac> </mrow> <mo>=</mo> <mo>&#x2212;<!-- − --></mo> <mrow class="MJX-TeXAtom-ORD"> <mfrac> <mfrac> <mrow> <mi mathvariant="normal">&#x2202;<!-- ∂ --></mi> <mi>I</mi> <mo stretchy="false">(</mo> <mi>x</mi> <mo>,</mo> <mi>t</mi> <mo stretchy="false">)</mo> </mrow> <mrow> <mi mathvariant="normal">&#x2202;<!-- ∂ --></mi> <mi>t</mi> </mrow> </mfrac> <mfrac> <mrow> <mi mathvariant="normal">&#x2202;<!-- ∂ --></mi> <mi>I</mi> <mo stretchy="false">(</mo> <mi>x</mi> <mo>,</mo> <mi>t</mi> <mo stretchy="false">)</mo> </mrow> <mrow> <mi mathvariant="normal">&#x2202;<!-- ∂ --></mi> <mi>x</mi> </mrow> </mfrac> </mfrac> </mrow> <mo>=</mo> <mo>&#x2212;<!-- − --></mo> <mrow class="MJX-TeXAtom-ORD"> <mfrac> <mrow> <msub> <mi>D</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>t</mi> </mrow> </msub> <mi>I</mi> </mrow> <mrow> <msub> <mi>D</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>x</mi> </mrow> </msub> <mi>I</mi> </mrow> </mfrac> </mrow> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle v={\frac {dx}{dt}}=-{\frac {\frac {\partial I(x,t)}{\partial t}}{\frac {\partial I(x,t)}{\partial x}}}=-{\frac {D_{t}I}{D_{x}I}}}</annotation> </semantics> </math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/9b75bc6917b1b56335f075d112cb9662c2ead38c" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -4.005ex; width:29.229ex; height:9.176ex;" alt="{\displaystyle v={\frac {dx}{dt}}=-{\frac {\frac {\partial I(x,t)}{\partial t}}{\frac {\partial I(x,t)}{\partial x}}}=-{\frac {D_{t}I}{D_{x}I}}}"></span></dd></dl> <p>This model of Motion sensors was originally developed in the field of computer vision, and it is based on the principle that the ratio of the temporal derivative of image brightness to the spatial derivative of image brightness gives the speed of motion. It is important to note that at the peaks and troughs of the image, this model will not compute an adequate answer, because the derivative in the denominator would be zero. In order to solve this problem, the first-order and higher-order spatial derivatives with respect to space and time can also be analyzed. Spatiotemporal Gradients is a good model for determining the speed of motion at all points in the image. </p> <div class="mw-heading mw-heading5"><h5 id="Motion_Sensors_are_Orientation-Selective">Motion Sensors are Orientation-Selective</h5><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;veaction=edit&amp;section=T-17" title="Edit section: Motion Sensors are Orientation-Selective" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;action=edit&amp;section=T-17" title="Edit section&#039;s source code: Motion Sensors are Orientation-Selective"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>One of the properties of Motion Sensors is orientation-selectivity, which constrains motion analysis to a single dimension. Motion sensors can only record motion in one dimension along an axis orthogonal to the sensor’s preferred orientation. A stimulus that contains features of a single orientation can only be seen to move in a direction orthogonal to the stimulus’ orientation. One-dimensional motion signals give ambiguous information about the motion of two-dimensional objects. A second stage of motion analysis is necessary in order to resolve the true direction of motion of a 2-D object or pattern. 1-D motion signals from sensors tuned to different orientations are combined to produce an unambiguous 2-D motion signal. Analysis of 2-D motion depends on signals from local broadly oriented sensors as well as on signals from narrowly oriented sensors. </p> <div class="mw-heading mw-heading5"><h5 id="Feature_Tracking">Feature Tracking</h5><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;veaction=edit&amp;section=T-18" title="Edit section: Feature Tracking" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;action=edit&amp;section=T-18" title="Edit section&#039;s source code: Feature Tracking"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p> Another way in which we perceive motion is through Feature Tracking. Feature Tracking consists of analyzing whether or not the local features of an object have changed positions, and inferring movement from this change. In this section, some features about Feature trackers are mentioned. </p><p>Feature trackers fail when a moving stimulus occurs very rapidly. Feature trackers have the advantage over Motion sensors that they can perceive movement of an object even if the movement is separated by intermittent blank intervals. They can also separate these two stages (movements and blank intervals). Motion sensors, on the other hand, would just integrate the blanks with the moving stimulus and see a continuous movement. Feature trackers operate on the locations of identified features. For that reason, they have a minimum distance threshold that matches the precision with which locations of features can be discriminated. Feature trackers do not show motion aftereffects, which are visual illusions that are caused as a result of visual adaptation. Motion aftereffects occur when, after observing a moving stimulus, a stationary object appears to be moving in the opposite direction of the previously observed moving stimulus. It is impossible for this mechanism to monitor multiple motions in different parts of the visual field and at the same time. On the other hand, multiple motions are not a problem for motion sensors, because they operate in parallel across the entire visual field. </p><p> Experiments have been conducted using the information above to reach interesting conclusions about feature trackers. Experiments with brief stimuli have shown that color patterns and contrast patterns at high contrasts are not perceived by feature trackers but by motion sensors. Experiments with blank intervals have confirmed that feature tracking can occur with blank intervals in the display. It is only at high contrast that motion sensors perceive the motion of chromatic stimuli and contrast patterns. At low contrasts feature trackers analyze the motion of both chromatic patterns and contrast envelopes and at high contrasts motion sensors analyze contrast envelopes. Experiments in which subjects make multiple motion judgments suggest that feature tracking is a process that occurs under conscious control and that it is the only way we have to analyze the motion of contrast envelopes in low-contrast displays. These results are consistent with the view that the motion of contrast envelopes and color patterns depends on feature tracking except when colors are well above threshold or mean contrast is high. The main conclusion of these experiments is that it is probably feature tracking that allows perception of contrast envelopes and color patterns. </p> <div class="mw-heading mw-heading5"><h5 id="Motion_Illusions">Motion Illusions</h5><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;veaction=edit&amp;section=T-19" title="Edit section: Motion Illusions" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;action=edit&amp;section=T-19" title="Edit section&#039;s source code: Motion Illusions"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>As a consequence of the process in which Motion detection works, some static images might seem to us like they are moving. These images give an insight into the assumptions that the visual system makes, and are called visual illusions. </p><p>A famous Motion Illusion related to first order motion signals is the Phi phenomenon, which is an optical illusion that makes us perceive movement instead of a sequence of images. This motion illusion allows us to watch movies as a continuum and not as separate images. The phi phenomenon allows a group of frozen images that are changed at a constant speed to be seen as a constant movement. The Phi phenomenon should not be confused with the Beta Movement, because the former is an apparent movement caused by luminous impulses in a sequence, while the later one is an apparent movement caused by luminous stationary impulses. </p><p>Motion Illusions happen when Motion Perception, Motion Analysis and the interpretation of these signals are misleading, and our visual system creates illusions about motion. These illusions can be classified according to which process allows them to happen. Illusions are classified as illusions related to motion sensing, 2D integration, and 3D interpretation </p><p>The most popular illusions concerning motion sensing are four-stroke motion, RDKs and second order motion signals illusions. The most popular motion illusions concerning 2D integration are Motion Capture, Plaid Motion and Direct Repulsion. Similarly, the ones concerning 3D interpretation are Transformational Motion, Kinetic Depth, Shadow Motion, Biological Motion, Stereokinetic motion, Implicit Figure Motion and 2 Stroke Motion. There are far more Motion Illusions, and they all show something interesting regarding human Motion Detection, Perception and Analysis mechanisms. For more information, visit the following link: <a rel="nofollow" class="external free" href="http://www.lifesci.sussex.ac.uk/home/George_Mather/Motion/">http://www.lifesci.sussex.ac.uk/home/George_Mather/Motion/</a> </p> <div class="mw-heading mw-heading5"><h5 id="Open_Problems">Open Problems</h5><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;veaction=edit&amp;section=T-20" title="Edit section: Open Problems" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;action=edit&amp;section=T-20" title="Edit section&#039;s source code: Open Problems"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p> Although we still do not understand most of the specifics regarding Motion Perception, understanding the mechanisms by which motion is perceived as well as motion illusion can give the reader a good overview of the state of the art in the subject. Some of the open problems regarding Motion Perception are the mechanisms of formation of 3D images in global motion and the <i>Aperture Problem</i>. </p><p>Global motion signals from the retina are integrated to arrive at a 2 dimensional global motion signal; however, it is unclear how 3D global motion is formed. The Aperture Problem occurs because each receptive field in the visual system covers only a small piece of the visual world, which leads to ambiguities in perception. The aperture problem refers to the problem of a moving contour that, when observed locally, is consistent with different possibilities of motion. This ambiguity is geometric in origin - motion parallel to the contour cannot be detected, as changes to this component of the motion do not change the images observed through the aperture. The only component that can be measured is the velocity orthogonal to the contour orientation; for that reason, the velocity of the movement could be anything from the family of motions along a line in velocity space. This aperture problem is not only observed in straight contours, but also in smoothly curved ones, since they are approximately straight when observed locally. Although the mechanisms to solve the Aperture Problem are still unknown, there exist some hypothesis on how it could be solved. For example, it could be possible to resolve this problem by combining information across space or from different contours of the same object. </p> <div class="mw-heading mw-heading5"><h5 id="Conclusions">Conclusions</h5><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;veaction=edit&amp;section=T-21" title="Edit section: Conclusions" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Signal_Processing&amp;action=edit&amp;section=T-21" title="Edit section&#039;s source code: Conclusions"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>In this chapter, we introduced Motion Perception and the mechanisms by which our visual system detects motion. Motion Illusions showed how Motion signals can be misleading, and consequently lead to incorrect conclusions about motion. It is important to remember that Motion Perception and Motion Analysis are not the same. Motion Sensors and Feature trackers complement each other to make the visual system perceive motion. </p><p>Motion Perception is complex, and it is still an open area of research. This chapter describes models about the way that Motion Sensors function, and hypotheses about Feature trackers characteristics; however, more experiments are necessary to learn about the characteristics of these mechanisms and be able to construct models that resemble the actual processes of the visual system more accurately. </p><p>The variety of mechanisms of motion analysis and motion perception described in this chapter, as well as the sophistication of the artificial models designed to describe them demonstrate that there is much complexity in the way in which the cortex processes signals from the outside environment. Thousands of specialized neurons integrate and interpret pieces of local signals to form global images of moving objects in our brain. Understanding that so many actors and processes in our bodies must work in concert to perceive motion makes our ability to it all the more remarkable that we as humans are able to do it with such ease. </p> <div class="noprint toclimit-3" style="float:left; margin:0.25em 0.5em 0.5em 0.25em; padding:0.5em 1.4em 0.8em 0em; background:transparent;"></div> <div class="mw-heading mw-heading3"><h3 id="Color_Perception">Color Perception</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Color_Perception&amp;veaction=edit&amp;section=T-1" title="Edit section: Color Perception" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Color_Perception&amp;action=edit&amp;section=T-1" title="Edit section&#039;s source code: Color Perception"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <div class="mw-heading mw-heading4"><h4 id="Introduction_2">Introduction</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Color_Perception&amp;veaction=edit&amp;section=T-2" title="Edit section: Introduction" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Color_Perception&amp;action=edit&amp;section=T-2" title="Edit section&#039;s source code: Introduction"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Humans (together with primates like monkeys and gorillas) have the best color perception among mammals <sup id="cite_ref-conway2009color_1-0" class="reference"><a href="#cite_note-conway2009color-1"><span class="cite-bracket">&#91;</span>1<span class="cite-bracket">&#93;</span></a></sup> . Hence, it is not a coincidence that color plays an important role in a wide variety of aspects. For example, color is useful for discriminating and differentiating objects, surfaces, natural scenery, and even faces <sup id="cite_ref-russell2007real_2-0" class="reference"><a href="#cite_note-russell2007real-2"><span class="cite-bracket">&#91;</span>2<span class="cite-bracket">&#93;</span></a></sup>,<sup id="cite_ref-gegenfurtner2000sensory_3-0" class="reference"><a href="#cite_note-gegenfurtner2000sensory-3"><span class="cite-bracket">&#91;</span>3<span class="cite-bracket">&#93;</span></a></sup>. Color is also an important tool for nonverbal communication, including that of emotion <sup id="cite_ref-changizi2006bare_4-0" class="reference"><a href="#cite_note-changizi2006bare-4"><span class="cite-bracket">&#91;</span>4<span class="cite-bracket">&#93;</span></a></sup>. </p><p>For many decades, it has been a challenge to find the links between the physical properties of color and its perceptual qualities. Usually, these are studied under two different approaches: the behavioral response caused by color (also called <i>psychophysics</i>) and the actual physiological response caused by it <sup id="cite_ref-beretta2000understanding_5-0" class="reference"><a href="#cite_note-beretta2000understanding-5"><span class="cite-bracket">&#91;</span>5<span class="cite-bracket">&#93;</span></a></sup>. </p><p>Here we will only focus on the latter. The study of the physiological basis of color vision, about which practically nothing was known before the second half of the twentieth century, has advanced slowly and steadily since 1950. Important progress has been made in many areas, especially at the receptor level. Thanks to molecular biology methods, it has been possible to reveal previously unknown details concerning the genetic basis for the cone pigments. Furthermore, more and more cortical regions have been shown to be influenced by visual stimuli, although the correlation of color perception with wavelength-dependent physiology activity beyond the receptors is not so easy to discern <sup id="cite_ref-boynton1988color_6-0" class="reference"><a href="#cite_note-boynton1988color-6"><span class="cite-bracket">&#91;</span>6<span class="cite-bracket">&#93;</span></a></sup>. </p><p>In this chapter, we aim to explain the basics of the different processes of color perception along the visual path, from the retina in the eye to the visual cortex in the brain. For anatomical details, please refer to Sec. "Anatomy of the Visual System" of this Wikibook. </p> <div class="mw-heading mw-heading4"><h4 id="Color_Perception_at_the_Retina">Color Perception at the Retina</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Color_Perception&amp;veaction=edit&amp;section=T-3" title="Edit section: Color Perception at the Retina" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Color_Perception&amp;action=edit&amp;section=T-3" title="Edit section&#039;s source code: Color Perception at the Retina"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>All colors that can be discriminated by humans can be produced by the mixture of just three primary (basic) colors. Inspired by this idea of color mixing, it has been proposed that color is subserved by three classes of sensors, each having a maximal sensitivity to a different part of the visible spectrum <sup id="cite_ref-conway2009color_1-1" class="reference"><a href="#cite_note-conway2009color-1"><span class="cite-bracket">&#91;</span>1<span class="cite-bracket">&#93;</span></a></sup>. It was first explicitly proposed in 1853 that there are three degrees of freedom in normal color matching <sup id="cite_ref-grassmann1853theorie_7-0" class="reference"><a href="#cite_note-grassmann1853theorie-7"><span class="cite-bracket">&#91;</span>7<span class="cite-bracket">&#93;</span></a></sup>. This was later confirmed in 1886 <sup id="cite_ref-konig1886grundempfindungen_8-0" class="reference"><a href="#cite_note-konig1886grundempfindungen-8"><span class="cite-bracket">&#91;</span>8<span class="cite-bracket">&#93;</span></a></sup> (with remarkably close results to recent studies <sup id="cite_ref-smith1975spectral_9-0" class="reference"><a href="#cite_note-smith1975spectral-9"><span class="cite-bracket">&#91;</span>9<span class="cite-bracket">&#93;</span></a></sup>, <sup id="cite_ref-vos1971derivation_10-0" class="reference"><a href="#cite_note-vos1971derivation-10"><span class="cite-bracket">&#91;</span>10<span class="cite-bracket">&#93;</span></a></sup>). </p><p>These proposed color sensors are actually the so called cones (Note: In this chapter, we will only deal with cones. Rods contribute to vision only at low light levels. Although they are known to have an effect on color perception, their influence is very small and can be ignored here.) <sup id="cite_ref-gegenfurtner2003color_11-0" class="reference"><a href="#cite_note-gegenfurtner2003color-11"><span class="cite-bracket">&#91;</span>11<span class="cite-bracket">&#93;</span></a></sup>. <i>Cones</i> are of the two types of photoreceptor cells found in the retina, with a significant concentration of them in the fovea. The Table below lists the three types of cone cells. These are distinguished by different types of rhodopsin pigment. Their corresponding absorption curves are shown in the Figure below. </p> <table class="wikitable"> <caption>Table 1: General overview of the cone types found in the retina. </caption> <tbody><tr> <th>Name</th> <th>Higher sensitivity to color</th> <th>Absorption curve peak [nm] </th></tr> <tr> <td>S, SWS, B</td> <td>Blue</td> <td>420 </td></tr> <tr> <td>M, MWS, G</td> <td>Green</td> <td>530 </td></tr> <tr> <td>L, LWS, R</td> <td>Red</td> <td>560 </td></tr></tbody></table> <figure class="mw-default-size mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:AbsorptionCurves.pdf" class="mw-file-description"><img alt="Absorption curves for the different cones. Blue, green, and red represent the absorption of the S (420 nm), M (530 nm), and L (560 nm) cones, respectively." src="//upload.wikimedia.org/wikipedia/commons/thumb/a/a8/AbsorptionCurves.pdf/page1-390px-AbsorptionCurves.pdf.jpg" decoding="async" width="390" height="308" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/a/a8/AbsorptionCurves.pdf/page1-585px-AbsorptionCurves.pdf.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/a/a8/AbsorptionCurves.pdf/page1-780px-AbsorptionCurves.pdf.jpg 2x" data-file-width="1008" data-file-height="797" /></a><figcaption>Absorption curves for the different cones. Blue, green, and red represent the absorption of the S (420 nm), M (530 nm), and L (560 nm) cones, respectively.</figcaption></figure> <p>Although no consensus has been reached for naming the different cone types, the most widely utilized designations refer either to their action spectra peak or to the color to which they are sensitive themselves (red, green, blue)<sup id="cite_ref-boynton1988color_6-1" class="reference"><a href="#cite_note-boynton1988color-6"><span class="cite-bracket">&#91;</span>6<span class="cite-bracket">&#93;</span></a></sup>. In this text, we will use the S-M-L designation (for short, medium, and long wavelength), since these names are more appropriately descriptive. The <i>blue-green-red</i> nomenclature is somewhat misleading, since all types of cones are sensitive to a large range of wavelengths. </p><p>An important feature about the three cone types is their relative distribution in the retina. It turns out that the S-cones present a relatively low concentration through the retina, being completely absent in the most central area of the fovea. Actually, they are too widely spaced to play an important role in spatial vision, although they are capable of mediating weak border perception <sup id="cite_ref-kaiser1985role_12-0" class="reference"><a href="#cite_note-kaiser1985role-12"><span class="cite-bracket">&#91;</span>12<span class="cite-bracket">&#93;</span></a></sup>. The fovea is dominated by L- and M-cones. The proportion of the two latter is usually measured as a ratio. Different values have been reported for the L/M ratio, ranging from 0.67 <sup id="cite_ref-paulus1983new_13-0" class="reference"><a href="#cite_note-paulus1983new-13"><span class="cite-bracket">&#91;</span>13<span class="cite-bracket">&#93;</span></a></sup> up to 2 <sup id="cite_ref-nerger1992ratio_14-0" class="reference"><a href="#cite_note-nerger1992ratio-14"><span class="cite-bracket">&#91;</span>14<span class="cite-bracket">&#93;</span></a></sup>, the latter being the most accepted. Why L-cones almost always outnumber the M-cones remains unclear. Surprisingly, the relative cone ratio has almost no significant impact on color vision. This clearly shows that the brain is plastic, capable of making sense out of whatever cone signals it receives <sup id="cite_ref-neitz2002color_15-0" class="reference"><a href="#cite_note-neitz2002color-15"><span class="cite-bracket">&#91;</span>15<span class="cite-bracket">&#93;</span></a></sup>, <sup id="cite_ref-jacobs2007emergence_16-0" class="reference"><a href="#cite_note-jacobs2007emergence-16"><span class="cite-bracket">&#91;</span>16<span class="cite-bracket">&#93;</span></a></sup>. </p><p>It is also important to note the overlapping of the L- and M-cone absorption spectra. While the S-cone absorption spectrum is clearly separated, the L- and M-cone peaks are only about 30 nm apart, their spectral curves significantly overlapping as well. This results in a high correlation in the photon catches of these two cone classes. This is explained by the fact that in order to achieve the highest possible acuity at the center of the fovea, the visual system treats L- and M-cones equally, not taking into account their absorption spectra. Therefore, any kind of difference leads to a deterioration of the luminance signal <sup id="cite_ref-osorio1998estimation_17-0" class="reference"><a href="#cite_note-osorio1998estimation-17"><span class="cite-bracket">&#91;</span>17<span class="cite-bracket">&#93;</span></a></sup>. In other words, the small separation between L- and M-cone spectra might be interpreted as a compromise between the needs for high-contrast color vision and high acuity luminance vision. This is congruent with the lack of S-cones in the central part of the fovea, where visual acuity is highest. Furthermore, the close spacing of L- and M-cone absorption spectra might also be explained by their genetic origin. Both cone types are assumed to have evolved "recently" (about 35 million years ago) from a common ancestor, while the S-cones presumably split off from the ancestral receptor much earlier<sup id="cite_ref-gegenfurtner2003color_11-1" class="reference"><a href="#cite_note-gegenfurtner2003color-11"><span class="cite-bracket">&#91;</span>11<span class="cite-bracket">&#93;</span></a></sup>. </p><p>The spectral absorption functions of the three different types of cone cells are the hallmark of human color vision. This theory solved a long-known problem: although we can see millions of different colors (humans can distinguish between 7 and 10 million different colors<sup id="cite_ref-beretta2000understanding_5-1" class="reference"><a href="#cite_note-beretta2000understanding-5"><span class="cite-bracket">&#91;</span>5<span class="cite-bracket">&#93;</span></a></sup>, our retinas simply do not have enough space to accommodate an individual detector for every color at every retinal location. </p> <div class="mw-heading mw-heading4"><h4 id="From_the_Retina_to_the_Brain">From the Retina to the Brain</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Color_Perception&amp;veaction=edit&amp;section=T-4" title="Edit section: From the Retina to the Brain" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Color_Perception&amp;action=edit&amp;section=T-4" title="Edit section&#039;s source code: From the Retina to the Brain"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The signals that are transmitted from the retina to higher levels are not simple point-wise representations of the receptor signals, but rather consist of sophisticated combinations of the receptor signals. The objective of this section is to provide a brief of the paths that some of this information takes. </p><p>Once the optical image on the retina is transduced into chemical and electrical signals in the photoreceptors, the amplitude-modulated signals are converted into frequency-modulated representations at the ganglion-cell and higher levels. In these neural cells, the magnitude of the signal is represented in terms of the number of spikes of voltage per second fired by the cell rather than by the voltage difference across the cell membrane. In order to explain and represent the physiological properties of these cells, we will find the concept of receptive fields very useful. </p><p>A <i>receptive field</i> is a graphical representation of the area in the visual field to which a given cell responds. Additionally, the nature of the response is typically indicated for various regions in the receptive field. For example, we can consider the receptive field of a photoreceptor as a small circular area representing the size and location of that particular receptor's sensitivity in the visual field. The Figure below shows exemplary receptive fields for ganglion cells, typically in a center-surround antagonism. The left receptive field in the figure illustrates a positive central response (know as <i>on-center</i>). This kind of response is usually generated by a positive input from a single cone surrounded by a negative response generated from several neighboring cones. Therefore, the response of this ganglion cell would be made up of inputs from various cones with both positive and negative signs. In this way, the cell not only responds to points of light, but serves as an edge (or more correctly, a spot) detector. In analogy to the computer vision terminology, we can think of the ganglion cell responses as the output of a convolution with an edge-detector kernel. The right receptive field of in the figure illustrates a negative central response (know as <i>off-center</i>), which is equally likely. Usually, on-center and off-center cells will occur at the same spatial location, fed by the same photoreceptors, resulting in an enhanced dynamic range. </p><p>The lower Figure shows that in addition to spatial antagonism, ganglion cells can also have spectral opponency. For instance, the left part of the lower figure illustrates a red-green opponent response with the center fed by positive input from an L-cone and the surrounding fed by a negative input from M-cones. On the other hand, the right part of the lower figure illustrates the off-center version of this cell. Hence, before the visual information has even left the retina, processing has already occurred, with a profound effect on color appearance. There are other types and varieties of ganglion cell responses, but they all share these basic concepts. </p> <style data-mw-deduplicate="TemplateStyles:r3721355/mw-parser-output/.tmulti">.mw-parser-output .tmulti .thumbinner{display:flex;flex-direction:column}.mw-parser-output .tmulti .trow{display:flex;flex-direction:row;clear:left;flex-wrap:wrap;width:100%;box-sizing:border-box}.mw-parser-output .tmulti .tsingle{margin:1px;float:left}.mw-parser-output .tmulti .theader{clear:both;font-weight:bold;text-align:center;align-self:center;background-color:transparent;width:100%}.mw-parser-output .tmulti .thumbcaption{background-color:transparent}.mw-parser-output .tmulti .text-align-left{text-align:left}.mw-parser-output .tmulti .text-align-right{text-align:right}.mw-parser-output .tmulti .text-align-center{text-align:center}@media all and (max-width:720px){.mw-parser-output .tmulti .thumbinner{width:100%!important;box-sizing:border-box;max-width:none!important;align-items:center}.mw-parser-output .tmulti .trow,.mw-parser-output .tmulti .trow *{justify-content:center}.mw-parser-output .tmulti .tsingle{float:none!important;max-width:100%!important;box-sizing:border-box}.mw-parser-output .tmulti .trow>.thumbcaption{text-align:center}}</style><div class="thumb tmulti tright"><div class="thumbinner" style="width:408px;max-width:408px"><div class="trow"><div class="tsingle" style="width:202px;max-width:202px"><div class="thumbimage"><span typeof="mw:File"><a href="/wiki/File:ReceptiveField_onCenter.pdf" class="mw-file-description"><img alt="Antagonist receptive fields (on center)" src="//upload.wikimedia.org/wikipedia/commons/thumb/e/e4/ReceptiveField_onCenter.pdf/page1-200px-ReceptiveField_onCenter.pdf.jpg" decoding="async" width="200" height="199" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/e/e4/ReceptiveField_onCenter.pdf/page1-300px-ReceptiveField_onCenter.pdf.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/e/e4/ReceptiveField_onCenter.pdf/page1-400px-ReceptiveField_onCenter.pdf.jpg 2x" data-file-width="602" data-file-height="600" /></a></span></div><div class="thumbcaption">On center</div></div><div class="tsingle" style="width:202px;max-width:202px"><div class="thumbimage"><span typeof="mw:File"><a href="/wiki/File:ReceptiveField_offCenter.pdf" class="mw-file-description"><img alt="Antagonist receptive fields (off center)" src="//upload.wikimedia.org/wikipedia/commons/thumb/a/a2/ReceptiveField_offCenter.pdf/page1-200px-ReceptiveField_offCenter.pdf.jpg" decoding="async" width="200" height="201" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/a/a2/ReceptiveField_offCenter.pdf/page1-300px-ReceptiveField_offCenter.pdf.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/a/a2/ReceptiveField_offCenter.pdf/page1-400px-ReceptiveField_offCenter.pdf.jpg 2x" data-file-width="597" data-file-height="600" /></a></span></div><div class="thumbcaption">Off center</div></div></div><div class="trow" style="display:flex"><div class="thumbcaption">Antagonist receptive fields</div></div></div></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r3721355/mw-parser-output/.tmulti"><div class="thumb tmulti tright"><div class="thumbinner" style="width:408px;max-width:408px"><div class="trow"><div class="tsingle" style="width:202px;max-width:202px"><div class="thumbimage"><span typeof="mw:File"><a href="/wiki/File:ReceptiveField_onCenter_rg.pdf" class="mw-file-description"><img alt="Spectrally and spatially antagonist receptive fields (on center)" src="//upload.wikimedia.org/wikipedia/commons/thumb/6/6b/ReceptiveField_onCenter_rg.pdf/page1-200px-ReceptiveField_onCenter_rg.pdf.jpg" decoding="async" width="200" height="201" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/6/6b/ReceptiveField_onCenter_rg.pdf/page1-300px-ReceptiveField_onCenter_rg.pdf.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/6/6b/ReceptiveField_onCenter_rg.pdf/page1-400px-ReceptiveField_onCenter_rg.pdf.jpg 2x" data-file-width="597" data-file-height="600" /></a></span></div><div class="thumbcaption">On center</div></div><div class="tsingle" style="width:202px;max-width:202px"><div class="thumbimage"><span typeof="mw:File"><a href="/wiki/File:ReceptiveField_offCenter_rg.pdf" class="mw-file-description"><img alt="Spectrally and spatially antagonist receptive fields (off center)" src="//upload.wikimedia.org/wikipedia/commons/thumb/c/cd/ReceptiveField_offCenter_rg.pdf/page1-200px-ReceptiveField_offCenter_rg.pdf.jpg" decoding="async" width="200" height="199" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/c/cd/ReceptiveField_offCenter_rg.pdf/page1-300px-ReceptiveField_offCenter_rg.pdf.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/c/cd/ReceptiveField_offCenter_rg.pdf/page1-400px-ReceptiveField_offCenter_rg.pdf.jpg 2x" data-file-width="593" data-file-height="591" /></a></span></div><div class="thumbcaption">Off center</div></div></div><div class="trow" style="display:flex"><div class="thumbcaption">Spectrally and spatially antagonist receptive fields.</div></div></div></div> <p>On their way to the primary visual cortex, ganglion cell axons gather to form the optic nerve, which projects to the lateral geniculate nucleus (LGN) in the thalamus. Coding in the optic nerve is highly efficient, keeping the number of nerve fibers to a minimum (limited by the size of the optic nerve) and thereby also the size of the retinal blind spot as small as possible (approximately 5° wide by 7° high). Furthermore, the presented ganglion cells would have no response to uniform illumination, since the positive and negative areas are balanced. In other words, the transmitted signals are uncorrelated. For example, information from neighboring parts of natural scenes are highly correlated spatially and therefore highly predictable <sup id="cite_ref-kersten1987predictability_18-0" class="reference"><a href="#cite_note-kersten1987predictability-18"><span class="cite-bracket">&#91;</span>18<span class="cite-bracket">&#93;</span></a></sup>. Lateral inhibition between neighboring retinal ganglion cells minimizes this spatial correlation, therefore improving efficiency. We can see this as a process of image compression carried out in the retina. </p><p>Given the overlapping of the L- and M-cone absorption spectra, their signals are also highly correlated. In this case, coding efficiency is improved by combining the cone signals in order to minimize said correlation. We can understand this more easily using Principal Component Analysis (PCA). PCA is a statistical method used to reduce the dimensionality of a given set of variables by transforming the original variables, to a set of new variables, the principal components (PCs). The first PC accounts for a maximal amount of total variance in the original variables, the second PC accounts for a maximal amount of variance that was not accounted for by the first component, and so on. In addition, PCs are linearly-independent and orthogonal to each other in the parameter space. PCA's main advantage is that only a few of the strongest PCs are enough to cover the vast majority of system variability <sup id="cite_ref-jolliffe2002principal_19-0" class="reference"><a href="#cite_note-jolliffe2002principal-19"><span class="cite-bracket">&#91;</span>19<span class="cite-bracket">&#93;</span></a></sup>. This scheme has been used with the cone absorption functions <sup id="cite_ref-buchsbaum1983trichromacy_20-0" class="reference"><a href="#cite_note-buchsbaum1983trichromacy-20"><span class="cite-bracket">&#91;</span>20<span class="cite-bracket">&#93;</span></a></sup> and even with the naturally occurring spectra<sup id="cite_ref-zaidi1997decorrelation_21-0" class="reference"><a href="#cite_note-zaidi1997decorrelation-21"><span class="cite-bracket">&#91;</span>21<span class="cite-bracket">&#93;</span></a></sup>,<sup id="cite_ref-ruderman1998statistics_22-0" class="reference"><a href="#cite_note-ruderman1998statistics-22"><span class="cite-bracket">&#91;</span>22<span class="cite-bracket">&#93;</span></a></sup>. The PCs that were found in the space of cone excitations produced by natural objects are 1) a luminance axis where the L- and M-cone signals are added (L+M), 2) the difference of the L- and M-cone signals (L-M), and 3) a color axis where the S-cone signal is differenced with the sum of the L- and M-cone signals (S-(L+M)). These channels, derived from a mathematical/computational approach, coincide with the three retino-geniculate channels discovered in electrophysiological experiments <sup id="cite_ref-lee1988physiological_23-0" class="reference"><a href="#cite_note-lee1988physiological-23"><span class="cite-bracket">&#91;</span>23<span class="cite-bracket">&#93;</span></a></sup>,<sup id="cite_ref-derrington1984chromatic_24-0" class="reference"><a href="#cite_note-derrington1984chromatic-24"><span class="cite-bracket">&#91;</span>24<span class="cite-bracket">&#93;</span></a></sup>. Using these mechanisms, visual redundant information is eliminated in the retina. </p><p>There are three channels of information that actually communicate this information from the retina through the ganglion cells to the LGN. They are different not only on their chromatic properties, but also in their anatomical substrate. These channels pose important limitations for basic color tasks, such as detection and discrimination. </p><p>In the first channel, the output of L- and M-cones is transmitted synergistically to diffuse bipolar cells and then to cells in the magnocellular layers (M-) of the LGN (not to be confused with the M-cones of the retina)<sup id="cite_ref-derrington1984chromatic_24-1" class="reference"><a href="#cite_note-derrington1984chromatic-24"><span class="cite-bracket">&#91;</span>24<span class="cite-bracket">&#93;</span></a></sup>. The receptive fields of the M-cells are composed of a center and a surround, which are spatially antagonist. M-cells have high-contrast sensitivity for luminance stimuli, but they show no response at some combination of L-M opponent inputs<sup id="cite_ref-shapley1990visual_25-0" class="reference"><a href="#cite_note-shapley1990visual-25"><span class="cite-bracket">&#91;</span>25<span class="cite-bracket">&#93;</span></a></sup>. However, because the null points of different M-cells vary slightly, the population response is never really zero. This property is actually passed on to cortical areas with predominant M-cell inputs<sup id="cite_ref-dobkins2000comparison_26-0" class="reference"><a href="#cite_note-dobkins2000comparison-26"><span class="cite-bracket">&#91;</span>26<span class="cite-bracket">&#93;</span></a></sup>. </p><p>The parvocellular pathway (P-) originates with the individual outputs from L- or M-cone to midget bipolar cells. These provide input to retinal P-cells<sup id="cite_ref-gegenfurtner2003color_11-2" class="reference"><a href="#cite_note-gegenfurtner2003color-11"><span class="cite-bracket">&#91;</span>11<span class="cite-bracket">&#93;</span></a></sup>. In the fovea, the receptive field centers of P-cells are formed by single L- or M-cones. The structure of the P-cell receptive field surround is still debated. However, the most accepted theory states that the surround consists of a specific cone type, resulting in a spatially opponent receptive field for luminance stimuli<sup id="cite_ref-martin2001chromatic_27-0" class="reference"><a href="#cite_note-martin2001chromatic-27"><span class="cite-bracket">&#91;</span>27<span class="cite-bracket">&#93;</span></a></sup>. Parvocellular layers contribute with about 80&#160;% of the total projections from the retina to the LGN<sup id="cite_ref-perry1984retinal_28-0" class="reference"><a href="#cite_note-perry1984retinal-28"><span class="cite-bracket">&#91;</span>28<span class="cite-bracket">&#93;</span></a></sup>. </p><p>Finally, the recently discovered koniocellular pathway (K-) carries mostly signals from S-cones<sup id="cite_ref-casagrande1994third_29-0" class="reference"><a href="#cite_note-casagrande1994third-29"><span class="cite-bracket">&#91;</span>29<span class="cite-bracket">&#93;</span></a></sup>. Groups of this type of cones project to special bipolar cells, which in turn provide input to specific small ganglion cells. These are usually not spatially opponent. The axons of the small ganglion cells project to thin layers of the LGN (adjacent to parvocellular layers)<sup id="cite_ref-hendry2000koniocellular_30-0" class="reference"><a href="#cite_note-hendry2000koniocellular-30"><span class="cite-bracket">&#91;</span>30<span class="cite-bracket">&#93;</span></a></sup>. </p><p>While the ganglion cells do terminate at the LGN (making synapses with LGN cells), there appears to be a one-to-one correspondence between ganglion cells and LGN cells. The LGN appears to act as a relay station for the signals. However, it probably serves some visual function, since there are neural projections from the cortex back to the LGN that could serve as some type of switching or adaptation feedback mechanism. The axons of LGN cells project to visual area one (V1) in the visual cortex in the occipital lobe. </p> <div class="mw-heading mw-heading4"><h4 id="Color_Perception_at_the_Brain">Color Perception at the Brain</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Color_Perception&amp;veaction=edit&amp;section=T-5" title="Edit section: Color Perception at the Brain" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Color_Perception&amp;action=edit&amp;section=T-5" title="Edit section&#039;s source code: Color Perception at the Brain"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>In the cortex, the projections from the magno-, parvo-, and koniocellular pathways end in different layers of the primary visual cortex. The magnocellular fibers innervate principally layer 4Cα and layer 6. Parvocellular neurons project mostly to 4Cβ, and layers 4A and 6. Koniocellular neurons terminate in the cytochrome oxidase (CO-) rich blobs in layers 1, 2, and 3<sup id="cite_ref-callaway1998local_31-0" class="reference"><a href="#cite_note-callaway1998local-31"><span class="cite-bracket">&#91;</span>31<span class="cite-bracket">&#93;</span></a></sup>. </p><p>Once in the visual cortex, the encoding of visual information becomes significantly more complex. In the same way the outputs of various photoreceptors are combined and compared to produce ganglion cell responses, the outputs of various LGN cells are compared and combined to produce cortical responses. As the signals advance further up in the cortical processing chain, this process repeats itself with a rapidly increasing level of complexity to the point that receptive fields begin to lose meaning. However, some functions and processes have been identified and studied in specific regions of the visual cortex. </p><p>In the V1 region (striate cortex), double opponent neurons - neurons that have their receptive fields both chromatically and spatially opposite with respect to the on/off regions of a single receptive field - compare color signals across the visual space <sup id="cite_ref-conway2001spatial_32-0" class="reference"><a href="#cite_note-conway2001spatial-32"><span class="cite-bracket">&#91;</span>32<span class="cite-bracket">&#93;</span></a></sup>. They constitute between 5 to 10% of the cells in V1. Their coarse size and small percentage matches the poor spatial resolution of color vision <sup id="cite_ref-conway2009color_1-2" class="reference"><a href="#cite_note-conway2009color-1"><span class="cite-bracket">&#91;</span>1<span class="cite-bracket">&#93;</span></a></sup>. Furthermore, they are not sensitive to the direction of moving stimuli (unlike some other V1 neurons) and, hence, unlikely to contribute to motion perception<sup id="cite_ref-horwitz2005paucity_33-0" class="reference"><a href="#cite_note-horwitz2005paucity-33"><span class="cite-bracket">&#91;</span>33<span class="cite-bracket">&#93;</span></a></sup>. However, given their specialized receptive field structure, these kind of cells are the neural basis for color contrast effects, as well as an efficient mean to encode color itself<sup id="cite_ref-danilova2006comparison_34-0" class="reference"><a href="#cite_note-danilova2006comparison-34"><span class="cite-bracket">&#91;</span>34<span class="cite-bracket">&#93;</span></a></sup>,<sup id="cite_ref-wachtler2003representation_35-0" class="reference"><a href="#cite_note-wachtler2003representation-35"><span class="cite-bracket">&#91;</span>35<span class="cite-bracket">&#93;</span></a></sup>. Other V1 cells respond to other types of stimuli, such as oriented edges, various spatial and temporal frequencies, particular spatial locations, and combinations of these features, among others. Additionally, we can find cells that linearly combine inputs from LGN cells as well as cells that perform nonlinear combination. These responses are needed to support advanced visual capabilities, such as color itself. </p> <figure class="mw-default-size mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:VisualCortex.pdf" class="mw-file-description"><img alt="(Partial) flow diagram illustrating the many streams of visual information processes that take place in the visual cortex. It is important to note that information can flow in both directions." src="//upload.wikimedia.org/wikipedia/commons/thumb/2/2f/VisualCortex.pdf/page1-390px-VisualCortex.pdf.jpg" decoding="async" width="390" height="267" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/2/2f/VisualCortex.pdf/page1-585px-VisualCortex.pdf.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/2/2f/VisualCortex.pdf/page1-780px-VisualCortex.pdf.jpg 2x" data-file-width="1327" data-file-height="908" /></a><figcaption>Fig. 4. (Partial) flow diagram illustrating the many streams of visual information processes that take place in the visual cortex. It is important to note that information can flow in both directions.</figcaption></figure> <p>There is substantially less information on the chromatic properties of single neurons in V2 as compared to V1. On a first glance, it seems that there are no major differences of color coding in V1 and V2<sup id="cite_ref-solomon2005chromatic_36-0" class="reference"><a href="#cite_note-solomon2005chromatic-36"><span class="cite-bracket">&#91;</span>36<span class="cite-bracket">&#93;</span></a></sup>. One exception to this is the emergence of a new class of color-complex cell<sup id="cite_ref-hubel1985complex_37-0" class="reference"><a href="#cite_note-hubel1985complex-37"><span class="cite-bracket">&#91;</span>37<span class="cite-bracket">&#93;</span></a></sup>. Therefore, it has been suggested that V2 region is involved in the elaboration of hue. However, this is still very controversial and has not been confirmed. </p><p>Following the modular concept developed after the discovery of functional ocular dominance in V1, and considering the anatomical segregation between the P-, M-, and K-pathways (described in Sec. 3), it was suggested that a specialized system within the visual cortex devoted to the analysis of color information should exist<sup id="cite_ref-livingstone1987psychophysical_38-0" class="reference"><a href="#cite_note-livingstone1987psychophysical-38"><span class="cite-bracket">&#91;</span>38<span class="cite-bracket">&#93;</span></a></sup>. V4 is the region that has historically attracted the most attention as the possible "color area" of the brain. This is because of an influential study that claimed that V4 contained 100&#160;% of hue-selective cells<sup id="cite_ref-zeki1973colour_39-0" class="reference"><a href="#cite_note-zeki1973colour-39"><span class="cite-bracket">&#91;</span>39<span class="cite-bracket">&#93;</span></a></sup>. However, this claim has been disputed by a number of subsequent studies, some even reporting that only 16&#160;% of V4 neurons show hue tuning<sup id="cite_ref-conway2006color_40-0" class="reference"><a href="#cite_note-conway2006color-40"><span class="cite-bracket">&#91;</span>40<span class="cite-bracket">&#93;</span></a></sup>. Currently, the most accepted concept is that V4 contributes not only to color, but to shape perception, visual attention, and stereopsis as well. Furthermore, recent studies have focused on other brain regions trying to find the "color area" of the brain, such as TEO<sup id="cite_ref-tootell2004search_41-0" class="reference"><a href="#cite_note-tootell2004search-41"><span class="cite-bracket">&#91;</span>41<span class="cite-bracket">&#93;</span></a></sup> and PITd<sup id="cite_ref-conway2007specialized_42-0" class="reference"><a href="#cite_note-conway2007specialized-42"><span class="cite-bracket">&#91;</span>42<span class="cite-bracket">&#93;</span></a></sup>. The relationship of these regions to each other is still debated. To reconcile the discussion, some use the term posterior inferior temporal (PIT) cortex to denote the region that includes V4, TEO, and PITd<sup id="cite_ref-conway2009color_1-3" class="reference"><a href="#cite_note-conway2009color-1"><span class="cite-bracket">&#91;</span>1<span class="cite-bracket">&#93;</span></a></sup>. </p><p>If the cortical response in V1, V2, and V4 cells is already a very complicated task, the level of complexity of complex visual responses in a network of approximately 30 visual zones is humongous. Figure 4 shows a small portion of the connectivity of the different cortical areas (not cells) that have been identified<sup id="cite_ref-fairchild2013color_43-0" class="reference"><a href="#cite_note-fairchild2013color-43"><span class="cite-bracket">&#91;</span>43<span class="cite-bracket">&#93;</span></a></sup>. </p><p>At this stage, it becomes exceedingly difficult to explain the function of singles cortical cells in simple terms. As a matter of fact, the function of a single cell might not have meaning since the representation of various perceptions must be distributed across collections of cells throughout the cortex. </p><p><br /> </p> <div class="mw-heading mw-heading4"><h4 id="Color_Vision_Adaptation_Mechanisms">Color Vision Adaptation Mechanisms</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Color_Perception&amp;veaction=edit&amp;section=T-6" title="Edit section: Color Vision Adaptation Mechanisms" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Color_Perception&amp;action=edit&amp;section=T-6" title="Edit section&#039;s source code: Color Vision Adaptation Mechanisms"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Although researchers have been trying to explain the processing of color signals in the human visual system, it is important to understand that color perception is not a fixed process. Actually, there are a variety of dynamic mechanisms that serve to optimize the visual response according to the viewing environment. Of particular relevance to color perception are the mechanisms of dark, light, and chromatic adaptation. </p> <div class="mw-heading mw-heading5"><h5 id="Dark_Adaptation">Dark Adaptation</h5><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Color_Perception&amp;veaction=edit&amp;section=T-7" title="Edit section: Dark Adaptation" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Color_Perception&amp;action=edit&amp;section=T-7" title="Edit section&#039;s source code: Dark Adaptation"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Dark adaptation refers to the change in visual sensitivity that occurs when the level of illumination is decreased. The visual system response to reduced illumination is to become more sensitive, increasing its capacity to produce a meaningful visual response even when the light conditions are suboptimal<sup id="cite_ref-webster1996human_44-0" class="reference"><a href="#cite_note-webster1996human-44"><span class="cite-bracket">&#91;</span>44<span class="cite-bracket">&#93;</span></a></sup>. </p> <figure class="mw-default-size mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:DarkAdaptation.pdf" class="mw-file-description"><img alt="Dark adaptation. During the first 10 minutes (i.e. to the left of the dotted line), sensitivity recovery is done by the cones. After the first 10 minutes (i.e. to the right of the dotted line), rods outperform the cones. Full sensitivity is recovered after approximately 30 minutes." src="//upload.wikimedia.org/wikipedia/commons/thumb/b/bf/DarkAdaptation.pdf/page1-390px-DarkAdaptation.pdf.jpg" decoding="async" width="390" height="269" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/b/bf/DarkAdaptation.pdf/page1-585px-DarkAdaptation.pdf.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/b/bf/DarkAdaptation.pdf/page1-780px-DarkAdaptation.pdf.jpg 2x" data-file-width="1052" data-file-height="725" /></a><figcaption>Fig. 5. Dark adaptation. During the first 10 minutes (i.e. to the left of the dotted line), sensitivity recovery is done by the cones. After the first 10 minutes (i.e. to the right of the dotted line), rods outperform the cones. Full sensitivity is recovered after approximately 30 minutes.</figcaption></figure> <p>Figure 5 shows the recovery of visual sensitivity after transition from an extremely high illumination level to complete darkness<sup id="cite_ref-fairchild2013color_43-1" class="reference"><a href="#cite_note-fairchild2013color-43"><span class="cite-bracket">&#91;</span>43<span class="cite-bracket">&#93;</span></a></sup>. First, the cones become gradually more sensitive, until the curve levels off after a couple of minutes. Then, after approximately 10 minutes have passed, visual sensitivity is roughly constant. At that point, the rod system, with a longer recovery time, has recovered enough sensitivity to outperform the cones and therefore recover control the overall sensitivity. Rod sensitivity gradually improves as well, until it becomes asymptotic after about 30 minutes. In other words, cones are responsible for the sensitivity recovery for the first 10 minutes. Afterwards, rods outperform the cones and gain full sensitivity after approximately 30 minutes. </p><p>This is only one of several neural mechanisms produced in order to adapt to the dark lightning conditions as good as possible. Some other neural mechanisms include the well-known pupil reflex, depletion and regeneration of photopigment, gain control in retinal cells and other higher-level mechanisms, and cognitive interpretation, among others. </p> <div class="mw-heading mw-heading5"><h5 id="Light_Adaptation">Light Adaptation</h5><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Color_Perception&amp;veaction=edit&amp;section=T-8" title="Edit section: Light Adaptation" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Color_Perception&amp;action=edit&amp;section=T-8" title="Edit section&#039;s source code: Light Adaptation"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Light adaptation is essentially the inverse process of dark adaptation. As a matter of fact, the underlying physiological mechanisms are the same for both processes. However, it is important to consider it separately since its visual properties differ. </p> <figure class="mw-default-size mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:LightAdaptation.pdf" class="mw-file-description"><img alt="Light adaptation. For a given scene, the solid lines represent families of visual response curves at different (relative) energy levels. The dashed line represents the case where we would adapt in order to cover the entire range of illumination, which would yield limited contrast and reduced sensitivity." src="//upload.wikimedia.org/wikipedia/commons/thumb/8/8d/LightAdaptation.pdf/page1-390px-LightAdaptation.pdf.jpg" decoding="async" width="390" height="314" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/8/8d/LightAdaptation.pdf/page1-585px-LightAdaptation.pdf.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/8/8d/LightAdaptation.pdf/page1-780px-LightAdaptation.pdf.jpg 2x" data-file-width="922" data-file-height="743" /></a><figcaption>Fig. 6. Light adaptation. For a given scene, the solid lines represent families of visual response curves at different (relative) energy levels. The dashed line represents the case where we would adapt in order to cover the entire range of illumination, which would yield limited contrast and reduced sensitivity.</figcaption></figure> <p>Light adaptation occurs when the level of illumination is increased. Therefore, the visual system must become less sensitive in order to produce useful perceptions, given the fact that there is significantly more visible light available. The visual system has a limited output dynamic range available for the signals that produce our perceptions. However, the real world has illumination levels covering at least 10 orders of magnitude more. Fortunately, we rarely need to view the entire range of illumination levels at the same time. </p><p>At high light levels, adaptation is achieved by photopigment bleaching. This scales photon capture in the receptors and protects the cone response from saturating at bright backgrounds. The mechanisms of light adaptation occur primarily within the retina<sup id="cite_ref-shapley1984visual_45-0" class="reference"><a href="#cite_note-shapley1984visual-45"><span class="cite-bracket">&#91;</span>45<span class="cite-bracket">&#93;</span></a></sup>. As a matter of fact, gain changes are largely cone-specific and adaptation pools signals over areas no larger than the diameter of individual cones<sup id="cite_ref-chaparro1995human_46-0" class="reference"><a href="#cite_note-chaparro1995human-46"><span class="cite-bracket">&#91;</span>46<span class="cite-bracket">&#93;</span></a></sup>,<sup id="cite_ref-macleod1992visual_47-0" class="reference"><a href="#cite_note-macleod1992visual-47"><span class="cite-bracket">&#91;</span>47<span class="cite-bracket">&#93;</span></a></sup>. This points to a localization of light adaptation that may be as early as the receptors. However, there appears to be more than one site of sensitivity scaling. Some of the gain changes are extremely rapid, while others take seconds or even minutes to stabilize<sup id="cite_ref-hayhoe1991adaptation_48-0" class="reference"><a href="#cite_note-hayhoe1991adaptation-48"><span class="cite-bracket">&#91;</span>48<span class="cite-bracket">&#93;</span></a></sup>. Usually, light adaptation takes around 5 minutes (six times faster than dark adaptation). This might point to the influence of post-receptive sites. </p><p>Figure 6 shows examples of light adaptation <sup id="cite_ref-fairchild2013color_43-2" class="reference"><a href="#cite_note-fairchild2013color-43"><span class="cite-bracket">&#91;</span>43<span class="cite-bracket">&#93;</span></a></sup>. If we would use a single response function to map the large range of intensities into the visual system's output, then we would only have a very small range at our disposal for a given scene. It is clear that with such a response function, the perceived contrast of any given scene would be limited and visual sensitivity to changes would be severely degraded due to signal-to-noise issues. This case is shown by the dashed line. On the other hand, solid lines represent families of visual responses. These curves map the useful illumination range in any given scene into the full dynamic range of the visual output, thus resulting in the best possible visual perception for each situation. Light adaptation can be thought of as the process of sliding the visual response curve along the illumination level axis until the optimum level for the given viewing conditions is reached. </p><p><br /> </p> <div class="mw-heading mw-heading5"><h5 id="Chromatic_Adaptation">Chromatic Adaptation</h5><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Color_Perception&amp;veaction=edit&amp;section=T-9" title="Edit section: Chromatic Adaptation" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Color_Perception&amp;action=edit&amp;section=T-9" title="Edit section&#039;s source code: Chromatic Adaptation"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The general concept of chromatic adaptation consists in the variation of the height of the three cone spectral responsivity curves. This adjustment arises because light adaptation occurs independently within each class of cone. A specific formulation of this hypothesis is known as the <i>von Kries adaptation</i>. This hypothesis states that the adaptation response takes place in each of the three cone types separately and is equivalent to multiplying their fixed spectral sensitivities by a scaling constant<sup id="cite_ref-macadam1970sources_49-0" class="reference"><a href="#cite_note-macadam1970sources-49"><span class="cite-bracket">&#91;</span>49<span class="cite-bracket">&#93;</span></a></sup>. If the scaling weights (also known as <i>von Kries coefficients</i>) are inversely proportional to the absorption of light by each cone type (i.e. a lower absorption will require a larger coefficient), then von Kries scaling maintains a constant mean response within each cone class. This provides a simple yet powerful mechanism for maintaining the perceived color of objects despite changes in illumination. Under a number of different conditions, von Kries scaling provides a good account of the effects of light adaptation on color sensitivity and appearance<sup id="cite_ref-webster1995colour_50-0" class="reference"><a href="#cite_note-webster1995colour-50"><span class="cite-bracket">&#91;</span>50<span class="cite-bracket">&#93;</span></a></sup>,<sup id="cite_ref-brainard1992asymmetric_51-0" class="reference"><a href="#cite_note-brainard1992asymmetric-51"><span class="cite-bracket">&#91;</span>51<span class="cite-bracket">&#93;</span></a></sup>. </p><p>The easiest way to picture chromatic adaptation is by examining a white object under different types of illumination. For example, let's consider examining a piece of paper under daylight, fluorescent, and incandescent illumination. Daylight contains relatively far more short-wavelength energy than fluorescent light, and incandescent illumination contains relatively far more long-wavelength energy than fluorescent light. However, in spite of the different illumination conditions, the paper approximately retains its white appearance under all three light sources. This is because the S-cone system becomes relatively less sensitive under daylight (in order to compensate for the additional short-wavelength energy) and the L-cone system becomes relatively less sensitive under incandescent illumination (in order to compensate for the additional long-wavelength energy)<sup id="cite_ref-fairchild2013color_43-3" class="reference"><a href="#cite_note-fairchild2013color-43"><span class="cite-bracket">&#91;</span>43<span class="cite-bracket">&#93;</span></a></sup>. </p> <div class="mw-heading mw-heading4"><h4 id="References_2">References</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Sensory_Systems/Visual_Color_Perception&amp;veaction=edit&amp;section=T-10" title="Edit section: References" class="mw-editsection-visualeditor"><span>edit</span></a><span class="mw-editsection-divider"> | </span><a href="/w/index.php?title=Sensory_Systems/Visual_Color_Perception&amp;action=edit&amp;section=T-10" title="Edit section&#039;s source code: References"><span>edit source</span></a><span class="mw-editsection-bracket">]</span></span></div> <p><span id="top-navigation" class="noprint">&#8592; <a href="/wiki/Sensory_Systems/Introduction" title="Sensory Systems/Introduction">Introduction</a>&#160;·&#160;<a href="/wiki/Sensory_Systems/Auditory_System" title="Sensory Systems/Auditory System">Auditory System</a> &#8594;</span></p><div id="bottom-navigation" style="float: none; text-align: center;" class="noprint"><span style="border-top: 1px solid rgb(170, 170, 170);">← <a href="/wiki/Sensory_Systems/Introduction" title="Sensory Systems/Introduction">Introduction</a> ·&#160;<b><a href="/wiki/Sensory_Systems" title="Sensory Systems">Sensory Systems</a></b>&#160;· <a href="/wiki/Sensory_Systems/Auditory_System" title="Sensory Systems/Auditory System">Auditory System</a> →</span></div> <ol class="references"> <li id="cite_note-conway2009color-1"><span class="mw-cite-backlink">↑ <sup><a href="#cite_ref-conway2009color_1-0"><i><b>a</b></i></a></sup> <sup><a href="#cite_ref-conway2009color_1-1"><i><b>b</b></i></a></sup> <sup><a href="#cite_ref-conway2009color_1-2"><i><b>c</b></i></a></sup> <sup><a href="#cite_ref-conway2009color_1-3"><i><b>d</b></i></a></sup></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFConway,_Bevil_R2009" class="citation journal cs1">Conway, Bevil R (2009). "Color vision, cones, and color-coding in the cortex". <i>The neuroscientist</i>. <b>15</b>: 274–290.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+neuroscientist&amp;rft.atitle=Color+vision%2C+cones%2C+and+color-coding+in+the+cortex&amp;rft.volume=15&amp;rft.pages=274-290&amp;rft.date=2009&amp;rft.au=Conway%2C+Bevil+R&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span></span> </li> <li id="cite_note-russell2007real-2"><span class="mw-cite-backlink"><a href="#cite_ref-russell2007real_2-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFRussell,_Richard_and_Sinha,_Pawan&#125;2007" class="citation journal cs1">Russell, Richard and Sinha, Pawan} (2007). "Real-world face recognition: The importance of surface reflectance properties". <i>Perception</i>. <b>36</b> (9).</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Perception&amp;rft.atitle=Real-world+face+recognition%3A+The+importance+of+surface+reflectance+properties&amp;rft.volume=36&amp;rft.issue=9&amp;rft.date=2007&amp;rft.au=Russell%2C+Richard+and+Sinha%2C+Pawan%7D&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-gegenfurtner2000sensory-3"><span class="mw-cite-backlink"><a href="#cite_ref-gegenfurtner2000sensory_3-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFGegenfurtner,_Karl_R_and_Rieger,_Jochem2000" class="citation journal cs1">Gegenfurtner, Karl R and Rieger, Jochem (2000). "Sensory and cognitive contributions of color to the recognition of natural scenes". <i>Current Biology</i>. <b>10</b> (13): 805–808.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Current+Biology&amp;rft.atitle=Sensory+and+cognitive+contributions+of+color+to+the+recognition+of+natural+scenes&amp;rft.volume=10&amp;rft.issue=13&amp;rft.pages=805-808&amp;rft.date=2000&amp;rft.au=Gegenfurtner%2C+Karl+R+and+Rieger%2C+Jochem&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-changizi2006bare-4"><span class="mw-cite-backlink"><a href="#cite_ref-changizi2006bare_4-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFChangizi,_Mark_A_and_Zhang,_Qiong_and_Shimojo,_Shinsuke2006" class="citation journal cs1">Changizi, Mark A and Zhang, Qiong and Shimojo, Shinsuke (2006). "Bare skin, blood and the evolution of primate colour vision". <i>Biology letters</i>. <b>2</b> (2): 217–221.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Biology+letters&amp;rft.atitle=Bare+skin%2C+blood+and+the+evolution+of+primate+colour+vision&amp;rft.volume=2&amp;rft.issue=2&amp;rft.pages=217-221&amp;rft.date=2006&amp;rft.au=Changizi%2C+Mark+A+and+Zhang%2C+Qiong+and+Shimojo%2C+Shinsuke&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-beretta2000understanding-5"><span class="mw-cite-backlink">↑ <sup><a href="#cite_ref-beretta2000understanding_5-0"><i><b>a</b></i></a></sup> <sup><a href="#cite_ref-beretta2000understanding_5-1"><i><b>b</b></i></a></sup></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFBeretta2000" class="citation book cs1">Beretta, Giordano (2000). <i>Understanding Color</i>. Hewlett-Packard.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Understanding+Color&amp;rft.pub=Hewlett-Packard&amp;rft.date=2000&amp;rft.aulast=Beretta&amp;rft.aufirst=Giordano&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span></span> </li> <li id="cite_note-boynton1988color-6"><span class="mw-cite-backlink">↑ <sup><a href="#cite_ref-boynton1988color_6-0"><i><b>a</b></i></a></sup> <sup><a href="#cite_ref-boynton1988color_6-1"><i><b>b</b></i></a></sup></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFBoynton,_Robert_M1988" class="citation journal cs1">Boynton, Robert M (1988). "Color vision". <i>Annual review of psychology</i>. <b>39</b> (1): 69–100.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Annual+review+of+psychology&amp;rft.atitle=Color+vision&amp;rft.volume=39&amp;rft.issue=1&amp;rft.pages=69-100&amp;rft.date=1988&amp;rft.au=Boynton%2C+Robert+M&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span></span> </li> <li id="cite_note-grassmann1853theorie-7"><span class="mw-cite-backlink"><a href="#cite_ref-grassmann1853theorie_7-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFGrassmann,_Hermann1853" class="citation journal cs1">Grassmann, Hermann (1853). "Zur theorie der farbenmischung". <i>Annalen der Physik</i>. <b>165</b> (5): 69–84.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Annalen+der+Physik&amp;rft.atitle=Zur+theorie+der+farbenmischung&amp;rft.volume=165&amp;rft.issue=5&amp;rft.pages=69-84&amp;rft.date=1853&amp;rft.au=Grassmann%2C+Hermann&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span></span> </li> <li id="cite_note-konig1886grundempfindungen-8"><span class="mw-cite-backlink"><a href="#cite_ref-konig1886grundempfindungen_8-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFKonig,_Arthur_and_Dieterici,_Conrad1886" class="citation journal cs1">Konig, Arthur and Dieterici, Conrad (1886). "Die Grundempfindungen und ihre intensitats-Vertheilung im Spectrum". <i>Koniglich Preussischen Akademie der Wissenschaften</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Koniglich+Preussischen+Akademie+der+Wissenschaften&amp;rft.atitle=Die+Grundempfindungen+und+ihre+intensitats-Vertheilung+im+Spectrum&amp;rft.date=1886&amp;rft.au=Konig%2C+Arthur+and+Dieterici%2C+Conrad&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-smith1975spectral-9"><span class="mw-cite-backlink"><a href="#cite_ref-smith1975spectral_9-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFSmith,_Vivianne_C_and_Pokorny,_Joel1975" class="citation journal cs1">Smith, Vivianne C and Pokorny, Joel (1975). "Spectral sensitivity of the foveal cone photopigments between 400 and 500 nm". <i>Vision research</i>. <b>15</b> (2): 161–171.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Vision+research&amp;rft.atitle=Spectral+sensitivity+of+the+foveal+cone+photopigments+between+400+and+500+nm&amp;rft.volume=15&amp;rft.issue=2&amp;rft.pages=161-171&amp;rft.date=1975&amp;rft.au=Smith%2C+Vivianne+C+and+Pokorny%2C+Joel&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-vos1971derivation-10"><span class="mw-cite-backlink"><a href="#cite_ref-vos1971derivation_10-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFVos,_JJ_and_Walraven,_PL1971" class="citation journal cs1">Vos, JJ and Walraven, PL (1971). "On the derivation of the foveal receptor primaries". <i>Vision Research</i>. <b>11</b> (8): 799–818.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Vision+Research&amp;rft.atitle=On+the+derivation+of+the+foveal+receptor+primaries&amp;rft.volume=11&amp;rft.issue=8&amp;rft.pages=799-818&amp;rft.date=1971&amp;rft.au=Vos%2C+JJ+and+Walraven%2C+PL&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-gegenfurtner2003color-11"><span class="mw-cite-backlink">↑ <sup><a href="#cite_ref-gegenfurtner2003color_11-0"><i><b>a</b></i></a></sup> <sup><a href="#cite_ref-gegenfurtner2003color_11-1"><i><b>b</b></i></a></sup> <sup><a href="#cite_ref-gegenfurtner2003color_11-2"><i><b>c</b></i></a></sup></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFGegenfurtner,_Karl_R_and_Kiper,_Daniel_C2003" class="citation journal cs1">Gegenfurtner, Karl R and Kiper, Daniel C (2003). "Color vision". <i>Neuroscience</i>. <b>26</b> (1): 181.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Neuroscience&amp;rft.atitle=Color+vision&amp;rft.volume=26&amp;rft.issue=1&amp;rft.pages=181&amp;rft.date=2003&amp;rft.au=Gegenfurtner%2C+Karl+R+and+Kiper%2C+Daniel+C&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-kaiser1985role-12"><span class="mw-cite-backlink"><a href="#cite_ref-kaiser1985role_12-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFKaiser,_Peter_K_and_Boynton,_Robert_M1985" class="citation journal cs1">Kaiser, Peter K and Boynton, Robert M (1985). "Role of the blue mechanism in wavelength discrimination". <i>Vision research</i>. <b>125</b> (4): 523–529.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Vision+research&amp;rft.atitle=Role+of+the+blue+mechanism+in+wavelength+discrimination&amp;rft.volume=125&amp;rft.issue=4&amp;rft.pages=523-529&amp;rft.date=1985&amp;rft.au=Kaiser%2C+Peter+K+and+Boynton%2C+Robert+M&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-paulus1983new-13"><span class="mw-cite-backlink"><a href="#cite_ref-paulus1983new_13-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFPaulus,_Walter_and_Kroger-Paulus,_Angelika1983" class="citation journal cs1">Paulus, Walter and Kroger-Paulus, Angelika (1983). "A new concept of retinal colour coding". <i>Vision research</i>. <b>23</b> (5): 529–540.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Vision+research&amp;rft.atitle=A+new+concept+of+retinal+colour+coding&amp;rft.volume=23&amp;rft.issue=5&amp;rft.pages=529-540&amp;rft.date=1983&amp;rft.au=Paulus%2C+Walter+and+Kroger-Paulus%2C+Angelika&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-nerger1992ratio-14"><span class="mw-cite-backlink"><a href="#cite_ref-nerger1992ratio_14-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFNerger,_Janice_L_and_Cicerone,_Carol_M1992" class="citation journal cs1">Nerger, Janice L and Cicerone, Carol M (1992). "The ratio of L cones to M cones in the human parafoveal retina". <i>Vision research</i>. <b>32</b> (5): 879–888.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Vision+research&amp;rft.atitle=The+ratio+of+L+cones+to+M+cones+in+the+human+parafoveal+retina&amp;rft.volume=32&amp;rft.issue=5&amp;rft.pages=879-888&amp;rft.date=1992&amp;rft.au=Nerger%2C+Janice+L+and+Cicerone%2C+Carol+M&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-neitz2002color-15"><span class="mw-cite-backlink"><a href="#cite_ref-neitz2002color_15-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFNeitz,_Jay_and_Carroll,_Joseph_and_Yamauchi,_Yasuki_and_Neitz,_Maureen_and_Williams,_David_R2002" class="citation journal cs1">Neitz, Jay and Carroll, Joseph and Yamauchi, Yasuki and Neitz, Maureen and Williams, David R (2002). "Color perception is mediated by a plastic neural mechanism that is adjustable in adults". <i>Neuron</i>. <b>35</b> (4): 783–792.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Neuron&amp;rft.atitle=Color+perception+is+mediated+by+a+plastic+neural+mechanism+that+is+adjustable+in+adults&amp;rft.volume=35&amp;rft.issue=4&amp;rft.pages=783-792&amp;rft.date=2002&amp;rft.au=Neitz%2C+Jay+and+Carroll%2C+Joseph+and+Yamauchi%2C+Yasuki+and+Neitz%2C+Maureen+and+Williams%2C+David+R&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-jacobs2007emergence-16"><span class="mw-cite-backlink"><a href="#cite_ref-jacobs2007emergence_16-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFJacobs,_Gerald_H_and_Williams,_Gary_A_and_Cahill,_Hugh_and_Nathans,_Jeremy2007" class="citation journal cs1">Jacobs, Gerald H and Williams, Gary A and Cahill, Hugh and Nathans, Jeremy (2007). "Emergence of novel color vision in mice engineered to express a human cone photopigment". <i>Science</i>. <b>315</b> (5819): 1723–1725.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Science&amp;rft.atitle=Emergence+of+novel+color+vision+in+mice+engineered+to+express+a+human+cone+photopigment&amp;rft.volume=315&amp;rft.issue=5819&amp;rft.pages=1723-1725&amp;rft.date=2007&amp;rft.au=Jacobs%2C+Gerald+H+and+Williams%2C+Gary+A+and+Cahill%2C+Hugh+and+Nathans%2C+Jeremy&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-osorio1998estimation-17"><span class="mw-cite-backlink"><a href="#cite_ref-osorio1998estimation_17-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFOsorio,_D_and_Ruderman,_DL_and_Cronin,_TW1998" class="citation journal cs1">Osorio, D and Ruderman, DL and Cronin, TW (1998). "Estimation of errors in luminance signals encoded by primate retina resulting from sampling of natural images with red and green cones". <i>JOSA A</i>. <b>15</b> (1): 16–22.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=JOSA+A&amp;rft.atitle=Estimation+of+errors+in+luminance+signals+encoded+by+primate+retina+resulting+from+sampling+of+natural+images+with+red+and+green+cones&amp;rft.volume=15&amp;rft.issue=1&amp;rft.pages=16-22&amp;rft.date=1998&amp;rft.au=Osorio%2C+D+and+Ruderman%2C+DL+and+Cronin%2C+TW&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-kersten1987predictability-18"><span class="mw-cite-backlink"><a href="#cite_ref-kersten1987predictability_18-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFKersten,_Daniel1987" class="citation journal cs1">Kersten, Daniel (1987). "Predictability and redundancy of natural images". <i>JOSA A</i>. <b>4</b> (112): 2395–2400.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=JOSA+A&amp;rft.atitle=Predictability+and+redundancy+of+natural+images&amp;rft.volume=4&amp;rft.issue=112&amp;rft.pages=2395-2400&amp;rft.date=1987&amp;rft.au=Kersten%2C+Daniel&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span></span> </li> <li id="cite_note-jolliffe2002principal-19"><span class="mw-cite-backlink"><a href="#cite_ref-jolliffe2002principal_19-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFJolliffe2002" class="citation book cs1">Jolliffe, I. T. (2002). <i>Principal Component Analysis</i>. Springer.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Principal+Component+Analysis&amp;rft.pub=Springer&amp;rft.date=2002&amp;rft.aulast=Jolliffe&amp;rft.aufirst=I.+T.&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span></span> </li> <li id="cite_note-buchsbaum1983trichromacy-20"><span class="mw-cite-backlink"><a href="#cite_ref-buchsbaum1983trichromacy_20-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFBuchsbaum,_Gershon_and_Gottschalk,_A1983" class="citation journal cs1">Buchsbaum, Gershon and Gottschalk, A (1983). "Trichromacy, opponent colours coding and optimum colour information transmission in the retina". <i>Proceedings of the Royal society of London. Series B. Biological sciences</i>. <b>220</b> (1218): 89–113.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Proceedings+of+the+Royal+society+of+London.+Series+B.+Biological+sciences&amp;rft.atitle=Trichromacy%2C+opponent+colours+coding+and+optimum+colour+information+transmission+in+the+retina&amp;rft.volume=220&amp;rft.issue=1218&amp;rft.pages=89-113&amp;rft.date=1983&amp;rft.au=Buchsbaum%2C+Gershon+and+Gottschalk%2C+A&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-zaidi1997decorrelation-21"><span class="mw-cite-backlink"><a href="#cite_ref-zaidi1997decorrelation_21-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFZaidi,_Qasim1997" class="citation journal cs1">Zaidi, Qasim (1997). "Decorrelation of L-and M-cone signals". <i>JOSA A</i>. <b>14</b> (12): 3430–3431.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=JOSA+A&amp;rft.atitle=Decorrelation+of+L-and+M-cone+signals&amp;rft.volume=14&amp;rft.issue=12&amp;rft.pages=3430-3431&amp;rft.date=1997&amp;rft.au=Zaidi%2C+Qasim&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span></span> </li> <li id="cite_note-ruderman1998statistics-22"><span class="mw-cite-backlink"><a href="#cite_ref-ruderman1998statistics_22-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFRuderman,_Daniel_L_and_Cronin,_Thomas_W_and_Chiao,_Chuan-Chin1998" class="citation journal cs1">Ruderman, Daniel L and Cronin, Thomas W and Chiao, Chuan-Chin (1998). "Statistics of cone responses to natural images: Implications for visual coding". <i>JOSA A</i>. <b>15</b> (8): 2036–2045.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=JOSA+A&amp;rft.atitle=Statistics+of+cone+responses+to+natural+images%3A+Implications+for+visual+coding&amp;rft.volume=15&amp;rft.issue=8&amp;rft.pages=2036-2045&amp;rft.date=1998&amp;rft.au=Ruderman%2C+Daniel+L+and+Cronin%2C+Thomas+W+and+Chiao%2C+Chuan-Chin&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-lee1988physiological-23"><span class="mw-cite-backlink"><a href="#cite_ref-lee1988physiological_23-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFLee,_BB_and_Martin,_PR_and_Valberg,_A1998" class="citation journal cs1">Lee, BB and Martin, PR and Valberg, A (1998). "The physiological basis of heterochromatic flicker photometry demonstrated in the ganglion cells of the macaque retina". <i>The Journal of Physiology</i>. <b>404</b> (1): 323–347.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Journal+of+Physiology&amp;rft.atitle=The+physiological+basis+of+heterochromatic+flicker+photometry+demonstrated+in+the+ganglion+cells+of+the+macaque+retina&amp;rft.volume=404&amp;rft.issue=1&amp;rft.pages=323-347&amp;rft.date=1998&amp;rft.au=Lee%2C+BB+and+Martin%2C+PR+and+Valberg%2C+A&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-derrington1984chromatic-24"><span class="mw-cite-backlink">↑ <sup><a href="#cite_ref-derrington1984chromatic_24-0"><i><b>a</b></i></a></sup> <sup><a href="#cite_ref-derrington1984chromatic_24-1"><i><b>b</b></i></a></sup></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFDerrington,_Andrew_M_and_Krauskopf,_John_and_Lennie,_Peter1984" class="citation journal cs1">Derrington, Andrew M and Krauskopf, John and Lennie, Peter (1984). "Chromatic mechanisms in lateral geniculate nucleus of macaque". <i>The Journal of Physiology</i>. <b>357</b> (1): 241–265.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Journal+of+Physiology&amp;rft.atitle=Chromatic+mechanisms+in+lateral+geniculate+nucleus+of+macaque&amp;rft.volume=357&amp;rft.issue=1&amp;rft.pages=241-265&amp;rft.date=1984&amp;rft.au=Derrington%2C+Andrew+M+and+Krauskopf%2C+John+and+Lennie%2C+Peter&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-shapley1990visual-25"><span class="mw-cite-backlink"><a href="#cite_ref-shapley1990visual_25-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFShapley,_Robert1990" class="citation journal cs1">Shapley, Robert (1990). "Visual sensitivity and parallel retinocortical channels". <i>Annual review of psychology</i>. <b>41</b> (1): 635--658.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Annual+review+of+psychology&amp;rft.atitle=Visual+sensitivity+and+parallel+retinocortical+channels&amp;rft.volume=41&amp;rft.issue=1&amp;rft.pages=635--658&amp;rft.date=1990&amp;rft.au=Shapley%2C+Robert&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span></span> </li> <li id="cite_note-dobkins2000comparison-26"><span class="mw-cite-backlink"><a href="#cite_ref-dobkins2000comparison_26-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFDobkins,_Karen_R_and_Thiele,_Alex_and_Albright,_Thomas_D2000" class="citation journal cs1">Dobkins, Karen R and Thiele, Alex and Albright, Thomas D (2000). "Comparison of red--green equiluminance points in humans and macaques: evidence for different L: M cone ratios between species". <i>JOSA A</i>. <b>17</b> (3): 545–556.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=JOSA+A&amp;rft.atitle=Comparison+of+red--green+equiluminance+points+in+humans+and+macaques%3A+evidence+for+different+L%3A+M+cone+ratios+between+species&amp;rft.volume=17&amp;rft.issue=3&amp;rft.pages=545-556&amp;rft.date=2000&amp;rft.au=Dobkins%2C+Karen+R+and+Thiele%2C+Alex+and+Albright%2C+Thomas+D&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-martin2001chromatic-27"><span class="mw-cite-backlink"><a href="#cite_ref-martin2001chromatic_27-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFMartin,_Paul_R_and_Lee,_Barry_B_and_White,_Andrew_JR_and_Solomon,_Samuel_G_and_Ruttiger,_Lukas2001" class="citation journal cs1">Martin, Paul R and Lee, Barry B and White, Andrew JR and Solomon, Samuel G and Ruttiger, Lukas (2001). "Chromatic sensitivity of ganglion cells in the peripheral primate retina". <i>Nature</i>. <b>410</b> (6831): 933–936.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Nature&amp;rft.atitle=Chromatic+sensitivity+of+ganglion+cells+in+the+peripheral+primate+retina&amp;rft.volume=410&amp;rft.issue=6831&amp;rft.pages=933-936&amp;rft.date=2001&amp;rft.au=Martin%2C+Paul+R+and+Lee%2C+Barry+B+and+White%2C+Andrew+JR+and+Solomon%2C+Samuel+G+and+Ruttiger%2C+Lukas&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-perry1984retinal-28"><span class="mw-cite-backlink"><a href="#cite_ref-perry1984retinal_28-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFPerry,_VH_and_Oehler,_R_and_Cowey,_A1984" class="citation journal cs1">Perry, VH and Oehler, R and Cowey, A (1984). "Retinal ganglion cells that project to the dorsal lateral geniculate nucleus in the macaque monkey". <i>Neuroscience</i>. <b>12</b> (4): 1101--1123.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Neuroscience&amp;rft.atitle=Retinal+ganglion+cells+that+project+to+the+dorsal+lateral+geniculate+nucleus+in+the+macaque+monkey&amp;rft.volume=12&amp;rft.issue=4&amp;rft.pages=1101--1123&amp;rft.date=1984&amp;rft.au=Perry%2C+VH+and+Oehler%2C+R+and+Cowey%2C+A&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span> <span class="cs1-visible-error citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: </span><span class="cs1-visible-error citation-comment">Cite has empty unknown parameter: <code class="cs1-code">&#124;month=</code> (<a href="/wiki/Help:CS1_errors#param_unknown_empty" title="Help:CS1 errors">help</a>)</span><span class="cs1-maint citation-comment">CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-casagrande1994third-29"><span class="mw-cite-backlink"><a href="#cite_ref-casagrande1994third_29-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFCasagrande,_VA1994" class="citation journal cs1">Casagrande, VA (1994). "A third parallel visual pathway to primate area V1". <i>Trends in neurosciences</i>. <b>17</b> (7): 305–310.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Trends+in+neurosciences&amp;rft.atitle=A+third+parallel+visual+pathway+to+primate+area+V1&amp;rft.volume=17&amp;rft.issue=7&amp;rft.pages=305-310&amp;rft.date=1994&amp;rft.au=Casagrande%2C+VA&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span></span> </li> <li id="cite_note-hendry2000koniocellular-30"><span class="mw-cite-backlink"><a href="#cite_ref-hendry2000koniocellular_30-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFHendry,_Stewart_HC_and_Reid,_R_Clay2000" class="citation journal cs1">Hendry, Stewart HC and Reid, R Clay (2000). "The koniocellular pathway in primate vision". <i>Annual review of neuroscience</i>. <b>23</b> (1): 127–153.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Annual+review+of+neuroscience&amp;rft.atitle=The+koniocellular+pathway+in+primate+vision&amp;rft.volume=23&amp;rft.issue=1&amp;rft.pages=127-153&amp;rft.date=2000&amp;rft.au=Hendry%2C+Stewart+HC+and+Reid%2C+R+Clay&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-callaway1998local-31"><span class="mw-cite-backlink"><a href="#cite_ref-callaway1998local_31-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFCallaway,_Edward_M1998" class="citation journal cs1">Callaway, Edward M (1998). "Local circuits in primary visual cortex of the macaque monkey". <i>Annual review of neuroscience</i>. <b>21</b> (1): 47–74.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Annual+review+of+neuroscience&amp;rft.atitle=Local+circuits+in+primary+visual+cortex+of+the+macaque+monkey&amp;rft.volume=21&amp;rft.issue=1&amp;rft.pages=47-74&amp;rft.date=1998&amp;rft.au=Callaway%2C+Edward+M&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span></span> </li> <li id="cite_note-conway2001spatial-32"><span class="mw-cite-backlink"><a href="#cite_ref-conway2001spatial_32-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFConway,_Bevil_R2001" class="citation journal cs1">Conway, Bevil R (2001). "Spatial structure of cone inputs to color cells in alert macaque primary visual cortex (V-1)". <i>The Journal of Neuroscience</i>. <b>21</b> (8): 2768–2783.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Journal+of+Neuroscience&amp;rft.atitle=Spatial+structure+of+cone+inputs+to+color+cells+in+alert+macaque+primary+visual+cortex+%28V-1%29&amp;rft.volume=21&amp;rft.issue=8&amp;rft.pages=2768-2783&amp;rft.date=2001&amp;rft.au=Conway%2C+Bevil+R&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span> <span class="cs1-visible-error citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: </span><span class="cs1-visible-error citation-comment">Cite has empty unknown parameter: <code class="cs1-code">&#124;month=</code> (<a href="/wiki/Help:CS1_errors#param_unknown_empty" title="Help:CS1 errors">help</a>)</span></span> </li> <li id="cite_note-horwitz2005paucity-33"><span class="mw-cite-backlink"><a href="#cite_ref-horwitz2005paucity_33-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFHorwitz,_Gregory_D_and_Albright,_Thomas_D2005" class="citation journal cs1">Horwitz, Gregory D and Albright, Thomas D (2005). "Paucity of chromatic linear motion detectors in macaque V1". <i>Journal of Vision</i>. <b>5</b> (6).</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Journal+of+Vision&amp;rft.atitle=Paucity+of+chromatic+linear+motion+detectors+in+macaque+V1&amp;rft.volume=5&amp;rft.issue=6&amp;rft.date=2005&amp;rft.au=Horwitz%2C+Gregory+D+and+Albright%2C+Thomas+D&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-danilova2006comparison-34"><span class="mw-cite-backlink"><a href="#cite_ref-danilova2006comparison_34-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFDanilova,_Marina_V_and_Mollon,_JD2006" class="citation journal cs1">Danilova, Marina V and Mollon, JD (2006). "The comparison of spatially separated colours". <i>Vision research</i>. <b>46</b> (6): 823–836.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Vision+research&amp;rft.atitle=The+comparison+of+spatially+separated+colours&amp;rft.volume=46&amp;rft.issue=6&amp;rft.pages=823-836&amp;rft.date=2006&amp;rft.au=Danilova%2C+Marina+V+and+Mollon%2C+JD&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-wachtler2003representation-35"><span class="mw-cite-backlink"><a href="#cite_ref-wachtler2003representation_35-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFWachtler,_Thomas_and_Sejnowski,_Terrence_J_and_Albright,_Thomas_D2003" class="citation journal cs1">Wachtler, Thomas and Sejnowski, Terrence J and Albright, Thomas D (2003). "Representation of color stimuli in awake macaque primary visual cortex". <i>Neuron</i>. <b>37</b> (4): 681–691.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Neuron&amp;rft.atitle=Representation+of+color+stimuli+in+awake+macaque+primary+visual+cortex&amp;rft.volume=37&amp;rft.issue=4&amp;rft.pages=681-691&amp;rft.date=2003&amp;rft.au=Wachtler%2C+Thomas+and+Sejnowski%2C+Terrence+J+and+Albright%2C+Thomas+D&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-solomon2005chromatic-36"><span class="mw-cite-backlink"><a href="#cite_ref-solomon2005chromatic_36-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFSolomon,_Samuel_G_and_Lennie,_Peter2005" class="citation journal cs1">Solomon, Samuel G and Lennie, Peter (2005). "Chromatic gain controls in visual cortical neurons". <i>The Journal of neuroscience</i>. <b>25</b> (19): 4779–4792.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Journal+of+neuroscience&amp;rft.atitle=Chromatic+gain+controls+in+visual+cortical+neurons&amp;rft.volume=25&amp;rft.issue=19&amp;rft.pages=4779-4792&amp;rft.date=2005&amp;rft.au=Solomon%2C+Samuel+G+and+Lennie%2C+Peter&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-hubel1985complex-37"><span class="mw-cite-backlink"><a href="#cite_ref-hubel1985complex_37-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFHubel1995" class="citation book cs1">Hubel, David H (1995). <i>Eye, brain, and vision</i>. Scientific American Library/Scientific American Books.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Eye%2C+brain%2C+and+vision&amp;rft.pub=Scientific+American+Library%2FScientific+American+Books&amp;rft.date=1995&amp;rft.aulast=Hubel&amp;rft.aufirst=David+H&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span></span> </li> <li id="cite_note-livingstone1987psychophysical-38"><span class="mw-cite-backlink"><a href="#cite_ref-livingstone1987psychophysical_38-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFLivingstone,_Margaret_S_and_Hubel,_David_H1987" class="citation journal cs1">Livingstone, Margaret S and Hubel, David H (1987). "Psychophysical evidence for separate channels for the perception of form, color, movement, and depth". <i>The Journal of Neuroscience</i>. <b>7</b> (11): 3416–3468.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Journal+of+Neuroscience&amp;rft.atitle=Psychophysical+evidence+for+separate+channels+for+the+perception+of+form%2C+color%2C+movement%2C+and+depth&amp;rft.volume=7&amp;rft.issue=11&amp;rft.pages=3416-3468&amp;rft.date=1987&amp;rft.au=Livingstone%2C+Margaret+S+and+Hubel%2C+David+H&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-zeki1973colour-39"><span class="mw-cite-backlink"><a href="#cite_ref-zeki1973colour_39-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFZeki,_Semir_M1973" class="citation journal cs1">Zeki, Semir M (1973). "Colour coding in rhesus monkey prestriate cortex". <i>Brain research</i>. <b>53</b> (2): 422–427.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Brain+research&amp;rft.atitle=Colour+coding+in+rhesus+monkey+prestriate+cortex&amp;rft.volume=53&amp;rft.issue=2&amp;rft.pages=422-427&amp;rft.date=1973&amp;rft.au=Zeki%2C+Semir+M&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span></span> </li> <li id="cite_note-conway2006color-40"><span class="mw-cite-backlink"><a href="#cite_ref-conway2006color_40-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFConway,_Bevil_R_and_Tsao,_Doris_Y2006" class="citation journal cs1">Conway, Bevil R and Tsao, Doris Y (2006). "Color architecture in alert macaque cortex revealed by fMRI". <i>Cerebral Cortex</i>. <b>16</b> (11): 1604–1613.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Cerebral+Cortex&amp;rft.atitle=Color+architecture+in+alert+macaque+cortex+revealed+by+fMRI&amp;rft.volume=16&amp;rft.issue=11&amp;rft.pages=1604-1613&amp;rft.date=2006&amp;rft.au=Conway%2C+Bevil+R+and+Tsao%2C+Doris+Y&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-tootell2004search-41"><span class="mw-cite-backlink"><a href="#cite_ref-tootell2004search_41-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFTootell,_Roger_BH_and_Nelissen,_Koen_and_Vanduffel,_Wim_and_Orban,_Guy_A2004" class="citation journal cs1">Tootell, Roger BH and Nelissen, Koen and Vanduffel, Wim and Orban, Guy A (2004). "Search for color 'center(s)'in macaque visual cortex". <i>Cerebral Cortex</i>. <b>14</b> (4): 353–363.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Cerebral+Cortex&amp;rft.atitle=Search+for+color+%27center%28s%29%27in+macaque+visual+cortex&amp;rft.volume=14&amp;rft.issue=4&amp;rft.pages=353-363&amp;rft.date=2004&amp;rft.au=Tootell%2C+Roger+BH+and+Nelissen%2C+Koen+and+Vanduffel%2C+Wim+and+Orban%2C+Guy+A&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-conway2007specialized-42"><span class="mw-cite-backlink"><a href="#cite_ref-conway2007specialized_42-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFConway,_Bevil_R_and_Moeller,_Sebastian_and_Tsao,_Doris_Y2007" class="citation journal cs1">Conway, Bevil R and Moeller, Sebastian and Tsao, Doris Y (2007). "Specialized color modules in macaque extrastriate cortex". <i>560--573</i>. <b>56</b> (3): 560–573.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=560--573&amp;rft.atitle=Specialized+color+modules+in+macaque+extrastriate+cortex&amp;rft.volume=56&amp;rft.issue=3&amp;rft.pages=560-573&amp;rft.date=2007&amp;rft.au=Conway%2C+Bevil+R+and+Moeller%2C+Sebastian+and+Tsao%2C+Doris+Y&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-fairchild2013color-43"><span class="mw-cite-backlink">↑ <sup><a href="#cite_ref-fairchild2013color_43-0"><i><b>a</b></i></a></sup> <sup><a href="#cite_ref-fairchild2013color_43-1"><i><b>b</b></i></a></sup> <sup><a href="#cite_ref-fairchild2013color_43-2"><i><b>c</b></i></a></sup> <sup><a href="#cite_ref-fairchild2013color_43-3"><i><b>d</b></i></a></sup></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFFairchild2013" class="citation book cs1">Fairchild, Mark D (2013). <i>Color appearance models</i>. John Wiley &amp; Sons.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Color+appearance+models&amp;rft.pub=John+Wiley+%26+Sons&amp;rft.date=2013&amp;rft.aulast=Fairchild&amp;rft.aufirst=Mark+D&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span></span> </li> <li id="cite_note-webster1996human-44"><span class="mw-cite-backlink"><a href="#cite_ref-webster1996human_44-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFWebster,_Michael_A1996" class="citation journal cs1">Webster, Michael A (1996). "Human colour perception and its adaptation". <i>Network: Computation in Neural Systems</i>. <b>7</b> (4): 587–634.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Network%3A+Computation+in+Neural+Systems&amp;rft.atitle=Human+colour+perception+and+its+adaptation&amp;rft.volume=7&amp;rft.issue=4&amp;rft.pages=587-634&amp;rft.date=1996&amp;rft.au=Webster%2C+Michael+A&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span></span> </li> <li id="cite_note-shapley1984visual-45"><span class="mw-cite-backlink"><a href="#cite_ref-shapley1984visual_45-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFShapley,_Robert_and_Enroth-Cugell,_Christina1984" class="citation journal cs1">Shapley, Robert and Enroth-Cugell, Christina (1984). "Visual adaptation and retinal gain controls". <i>Progress in retinal research</i>. <b>3</b>: 263–346.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Progress+in+retinal+research&amp;rft.atitle=Visual+adaptation+and+retinal+gain+controls&amp;rft.volume=3&amp;rft.pages=263-346&amp;rft.date=1984&amp;rft.au=Shapley%2C+Robert+and+Enroth-Cugell%2C+Christina&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-chaparro1995human-46"><span class="mw-cite-backlink"><a href="#cite_ref-chaparro1995human_46-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFChaparro,_A_and_Stromeyer_III,_CF_and_Chen,_G_and_Kronauer,_RE1995" class="citation journal cs1">Chaparro, A and Stromeyer III, CF and Chen, G and Kronauer, RE (1995). "Human cones appear to adapt at low light levels: Measurements on the red-green detection mechanism". <i>Vision Research</i>. <b>35</b> (22): 3103–3118.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Vision+Research&amp;rft.atitle=Human+cones+appear+to+adapt+at+low+light+levels%3A+Measurements+on+the+red-green+detection+mechanism&amp;rft.volume=35&amp;rft.issue=22&amp;rft.pages=3103-3118&amp;rft.date=1995&amp;rft.au=Chaparro%2C+A+and+Stromeyer+III%2C+CF+and+Chen%2C+G+and+Kronauer%2C+RE&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-macleod1992visual-47"><span class="mw-cite-backlink"><a href="#cite_ref-macleod1992visual_47-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFMacleod,_Donald_IA_and_Williams,_David_R_and_Makous,_Walter1992" class="citation journal cs1">Macleod, Donald IA and Williams, David R and Makous, Walter (1992). "A visual nonlinearity fed by single cones". <i>Vision research</i>. <b>32</b> (2): 347–363.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Vision+research&amp;rft.atitle=A+visual+nonlinearity+fed+by+single+cones&amp;rft.volume=32&amp;rft.issue=2&amp;rft.pages=347-363&amp;rft.date=1992&amp;rft.au=Macleod%2C+Donald+IA+and+Williams%2C+David+R+and+Makous%2C+Walter&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-hayhoe1991adaptation-48"><span class="mw-cite-backlink"><a href="#cite_ref-hayhoe1991adaptation_48-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFHayhoe1991" class="citation book cs1">Hayhoe, Mary (1991). <i>Adaptation mechanisms in color and brightness</i>. Springer.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Adaptation+mechanisms+in+color+and+brightness&amp;rft.pub=Springer&amp;rft.date=1991&amp;rft.aulast=Hayhoe&amp;rft.aufirst=Mary&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span></span> </li> <li id="cite_note-macadam1970sources-49"><span class="mw-cite-backlink"><a href="#cite_ref-macadam1970sources_49-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFMacAdam1970" class="citation book cs1">MacAdam, DAvid L (1970). <i>Sources of Color Science</i>. MIT Press.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Sources+of+Color+Science&amp;rft.pub=MIT+Press&amp;rft.date=1970&amp;rft.aulast=MacAdam&amp;rft.aufirst=DAvid+L&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span></span> </li> <li id="cite_note-webster1995colour-50"><span class="mw-cite-backlink"><a href="#cite_ref-webster1995colour_50-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFWebster,_Michael_A_and_Mollon,_JD1995" class="citation journal cs1">Webster, Michael A and Mollon, JD (1995). "Colour constancy influenced by contrast adaptation". <i>Nature</i>. <b>373</b> (6516): 694–698.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Nature&amp;rft.atitle=Colour+constancy+influenced+by+contrast+adaptation&amp;rft.volume=373&amp;rft.issue=6516&amp;rft.pages=694-698&amp;rft.date=1995&amp;rft.au=Webster%2C+Michael+A+and+Mollon%2C+JD&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-brainard1992asymmetric-51"><span class="mw-cite-backlink"><a href="#cite_ref-brainard1992asymmetric_51-0">↑</a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r4271529"><cite id="CITEREFBrainard,_David_H_and_Wandell,_Brian_A1992" class="citation journal cs1">Brainard, David H and Wandell, Brian A (1992). "Asymmetric color matching: how color appearance depends on the illuminant". <i>JOSA A</i>. <b>9</b> (9): 1443–1448.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=JOSA+A&amp;rft.atitle=Asymmetric+color+matching%3A+how+color+appearance+depends+on+the+illuminant&amp;rft.volume=9&amp;rft.issue=9&amp;rft.pages=1443-1448&amp;rft.date=1992&amp;rft.au=Brainard%2C+David+H+and+Wandell%2C+Brian+A&amp;rfr_id=info%3Asid%2Fen.wikibooks.org%3ASensory+Systems%2FVisual+System" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> </ol></div><!--esi <esi:include src="/esitest-fa8a495983347898/content" /> --><noscript><img src="https://login.wikimedia.org/wiki/Special:CentralAutoLogin/start?type=1x1&amp;useformat=desktop" alt="" width="1" height="1" style="border: none; position: absolute;"></noscript> <div class="printfooter" data-nosnippet="">Retrieved from "<a dir="ltr" href="https://en.wikibooks.org/w/index.php?title=Sensory_Systems/Visual_System&amp;oldid=3122959">https://en.wikibooks.org/w/index.php?title=Sensory_Systems/Visual_System&amp;oldid=3122959</a>"</div></div> <div id="catlinks" class="catlinks" data-mw="interface"><div id="mw-normal-catlinks" class="mw-normal-catlinks"><a href="/wiki/Special:Categories" title="Special:Categories">Category</a>: <ul><li><a href="/wiki/Category:Book:Sensory_Systems" title="Category:Book:Sensory Systems">Book:Sensory Systems</a></li></ul></div><div id="mw-hidden-catlinks" class="mw-hidden-catlinks mw-hidden-cats-hidden">Hidden categories: <ul><li><a href="/wiki/Category:CS1_errors:_DOI" title="Category:CS1 errors: DOI">CS1 errors: DOI</a></li><li><a href="/wiki/Category:CS1_errors:_missing_title" title="Category:CS1 errors: missing title">CS1 errors: missing title</a></li><li><a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">CS1 maint: multiple names: authors list</a></li><li><a href="/wiki/Category:CS1_errors:_empty_unknown_parameters" title="Category:CS1 errors: empty unknown parameters">CS1 errors: empty unknown parameters</a></li></ul></div></div> </div> </main> </div> <div class="mw-footer-container"> <footer id="footer" class="mw-footer" > <ul id="footer-info"> <li id="footer-info-lastmod"> This page was last edited on 21 September 2016, at 20:29.</li> <li id="footer-info-copyright">Text is available under the <a rel="nofollow" class="external text" href="//creativecommons.org/licenses/by-sa/4.0/">Creative Commons Attribution-ShareAlike License</a>; additional terms may apply. By using this site, you agree to the <a class="external text" href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Terms_of_Use">Terms of Use</a> and <a class="external text" href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy">Privacy Policy.</a></li> </ul> <ul id="footer-places"> <li id="footer-places-privacy"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy">Privacy policy</a></li> <li id="footer-places-about"><a href="/wiki/Wikibooks:Welcome">About Wikibooks</a></li> <li id="footer-places-disclaimers"><a href="/wiki/Wikibooks:General_disclaimer">Disclaimers</a></li> <li id="footer-places-wm-codeofconduct"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Universal_Code_of_Conduct">Code of Conduct</a></li> <li id="footer-places-developers"><a href="https://developer.wikimedia.org">Developers</a></li> <li id="footer-places-statslink"><a href="https://stats.wikimedia.org/#/en.wikibooks.org">Statistics</a></li> <li id="footer-places-cookiestatement"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Cookie_statement">Cookie statement</a></li> <li id="footer-places-mobileview"><a href="//en.m.wikibooks.org/w/index.php?title=Sensory_Systems/Visual_System&amp;mobileaction=toggle_view_mobile" class="noprint stopMobileRedirectToggle">Mobile view</a></li> </ul> <ul id="footer-icons" class="noprint"> <li id="footer-copyrightico"><a href="https://wikimediafoundation.org/" class="cdx-button cdx-button--fake-button cdx-button--size-large cdx-button--fake-button--enabled"><img src="/static/images/footer/wikimedia-button.svg" width="84" height="29" alt="Wikimedia Foundation" loading="lazy"></a></li> <li id="footer-poweredbyico"><a href="https://www.mediawiki.org/" class="cdx-button cdx-button--fake-button cdx-button--size-large cdx-button--fake-button--enabled"><img src="/w/resources/assets/poweredby_mediawiki.svg" alt="Powered by MediaWiki" width="88" height="31" loading="lazy"></a></li> </ul> </footer> </div> </div> </div> <div class="vector-settings" id="p-dock-bottom"> <ul></ul> </div><script>(RLQ=window.RLQ||[]).push(function(){mw.config.set({"wgHostname":"mw-web.codfw.main-7fc47fc68d-2bhx2","wgBackendResponseTime":174,"wgPageParseReport":{"limitreport":{"cputime":"0.483","walltime":"0.612","ppvisitednodes":{"value":3173,"limit":1000000},"postexpandincludesize":{"value":224892,"limit":2097152},"templateargumentsize":{"value":2438,"limit":2097152},"expansiondepth":{"value":12,"limit":100},"expensivefunctioncount":{"value":0,"limit":500},"unstrip-depth":{"value":0,"limit":20},"unstrip-size":{"value":110482,"limit":5000000},"entityaccesscount":{"value":0,"limit":400},"timingprofile":["100.00% 511.131 1 -total"," 53.57% 273.837 52 Template:Cite_journal"," 23.58% 120.529 1 Sensory_Systems/Visual_Signal_Processing"," 10.34% 52.843 1 Sensory_Systems/Visual_Color_Perception"," 10.05% 51.377 6 Template:BookCat"," 9.27% 47.373 1 Template:SensorySystems_Navigation"," 4.22% 21.583 6 Template:Cite_book"," 3.46% 17.704 2 Template:Multiple_image"," 3.29% 16.836 6 Template:Evalx"," 2.61% 13.329 1 Sensory_Systems/Visual_Anatomy"]},"scribunto":{"limitreport-timeusage":{"value":"0.286","limit":"10.000"},"limitreport-memusage":{"value":4503811,"limit":52428800}},"cachereport":{"origin":"mw-web.codfw.main-594d4bbbb-w6gj7","timestamp":"20241101215002","ttl":2592000,"transientcontent":false}}});});</script> <script type="application/ld+json">{"@context":"https:\/\/schema.org","@type":"Article","name":"Sensory Systems\/Visual System","url":"https:\/\/en.wikibooks.org\/wiki\/Sensory_Systems\/Visual_System","sameAs":"http:\/\/www.wikidata.org\/entity\/Q65353426","mainEntity":"http:\/\/www.wikidata.org\/entity\/Q65353426","author":{"@type":"Organization","name":"Contributors to Wikimedia projects"},"publisher":{"@type":"Organization","name":"Wikimedia Foundation, Inc.","logo":{"@type":"ImageObject","url":"https:\/\/www.wikimedia.org\/static\/images\/wmf-hor-googpub.png"}},"datePublished":"2009-05-29T11:19:19Z"}</script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10